eathread_atomic_x86.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. ///////////////////////////////////////////////////////////////////////////////
  2. // Copyright (c) Electronic Arts Inc. All rights reserved.
  3. ///////////////////////////////////////////////////////////////////////////////
  4. #if defined(EA_PRAGMA_ONCE_SUPPORTED)
  5. #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
  6. #endif
  7. /////////////////////////////////////////////////////////////////////////////
  8. // Defines functionality for threadsafe primitive operations.
  9. /////////////////////////////////////////////////////////////////////////////
  10. #ifndef EATHREAD_X86_EATHREAD_ATOMIC_X86_H
  11. #define EATHREAD_X86_EATHREAD_ATOMIC_X86_H
  12. #include <EABase/eabase.h>
  13. #include <stddef.h>
  14. #include <eathread/internal/eathread_atomic_standalone.h>
  15. // 4146: unary minus operator applied to unsigned type, result still unsigned
  16. // 4339: use of undefined type detected in CLR meta-data
  17. EA_DISABLE_VC_WARNING(4146 4339)
  18. // This is required for Windows Phone (ARM) because we are temporarily not using
  19. // CPP11 style atomics and we are depending on the MSVC intrinics.
  20. #if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_ARM)
  21. #define EA_THREAD_ATOMIC_IMPLEMENTED
  22. namespace EA
  23. {
  24. namespace Thread
  25. {
  26. /// class AtomicInt
  27. /// Actual implementation may vary per platform. May require certain alignments, sizes,
  28. /// and declaration specifications per platform.
  29. template <class T>
  30. class AtomicInt
  31. {
  32. public:
  33. typedef AtomicInt<T> ThisType;
  34. typedef T ValueType;
  35. /// AtomicInt
  36. /// Empty constructor. Intentionally leaves mValue in an unspecified state.
  37. /// This is done so that an AtomicInt acts like a standard built-in integer.
  38. /// Problem: C/C++ has two ways to initialize a built-in type x: x and x(),
  39. /// and they have different semantics, as the first does nothing but
  40. /// the second initializes x to zero. C++ does not provide a means
  41. /// to tell which of tell which of these two ways a C++ class instance
  42. /// initialized. Thus we probably can't easily argue that this constructor
  43. /// should do nothing vs. initialize the variable to 0. It's probably
  44. /// safer for us to make it initialize to 0, and it wouldn't break
  45. /// users to do so, though it would add a tiny runtime cost.
  46. AtomicInt()
  47. {}
  48. AtomicInt(ValueType n) : mValue(0) // Initialize mValue because otherwise SetValue may read it before it's initialized.
  49. { SetValue(n); }
  50. AtomicInt(const ThisType& x)
  51. : mValue(x.GetValue()) {}
  52. AtomicInt& operator=(const ThisType& x)
  53. { mValue = x.GetValue(); return *this; }
  54. ValueType GetValue() const
  55. { return mValue; }
  56. ValueType GetValueRaw() const
  57. { return mValue; }
  58. ValueType SetValue(ValueType n);
  59. bool SetValueConditional(ValueType n, ValueType condition);
  60. ValueType Increment();
  61. ValueType Decrement();
  62. ValueType Add(ValueType n);
  63. // operators
  64. inline operator const ValueType() const { return GetValue(); }
  65. inline ValueType operator =(ValueType n) { SetValue(n); return n; }
  66. inline ValueType operator+=(ValueType n) { return Add(n);}
  67. inline ValueType operator-=(ValueType n) { return Add(-n);}
  68. inline ValueType operator++() { return Increment();}
  69. inline ValueType operator++(int) { return Increment() - 1;}
  70. inline ValueType operator--() { return Decrement(); }
  71. inline ValueType operator--(int) { return Decrement() + 1;}
  72. protected:
  73. volatile ValueType mValue;
  74. };
  75. #if defined(EA_PLATFORM_MICROSOFT) && defined(EA_COMPILER_MSVC)
  76. // 32 bit versions
  77. template<> inline
  78. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::SetValue(ValueType n)
  79. { return (ValueType)InterlockedExchangeImp((long*)&mValue, (long)n); } // Even though we shouldn't need to use InterlockedExchange on x86, the intrinsic x86 InterlockedExchange is at least as fast as C code we would otherwise put here.
  80. template<> inline
  81. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::SetValue(ValueType n)
  82. { return (ValueType)InterlockedExchangeImp((long*)&mValue, (long)n); } // Even though we shouldn't need to use InterlockedExchange on x86, the intrinsic x86 InterlockedExchange is at least as fast as C code we would otherwise put here.
  83. template<> inline
  84. bool AtomicInt<int32_t>::SetValueConditional(ValueType n, ValueType condition)
  85. { return ((ValueType)InterlockedCompareExchangeImp((long*)&mValue, (long)n, (long)condition) == condition); }
  86. template<> inline
  87. bool AtomicInt<uint32_t>::SetValueConditional(ValueType n, ValueType condition)
  88. { return ((ValueType)InterlockedCompareExchangeImp((long*)&mValue, (long)n, (long)condition) == condition); }
  89. template<> inline
  90. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Increment()
  91. { return (ValueType)InterlockedIncrementImp((long*)&mValue); }
  92. template<> inline
  93. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Increment()
  94. { return (ValueType)InterlockedIncrementImp((long*)&mValue); }
  95. template<> inline
  96. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Decrement()
  97. { return (ValueType)InterlockedDecrementImp((long*)&mValue); }
  98. template<> inline
  99. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Decrement()
  100. { return (ValueType)InterlockedDecrementImp((long*)&mValue); }
  101. template<> inline
  102. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Add(ValueType n)
  103. { return ((ValueType)InterlockedExchangeAddImp((long*)&mValue, (long)n) + n); }
  104. template<> inline
  105. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Add(ValueType n)
  106. { return ((ValueType)InterlockedExchangeAddImp((long*)&mValue, (long)n) + n); }
  107. // 64 bit versions
  108. template<> inline
  109. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::GetValue() const{
  110. int64_t condition, nNewValue;
  111. do{
  112. nNewValue = condition = mValue; // Todo: This function has a problem unless the assignment of mValue to condition is atomic.
  113. } while(!InterlockedSetIfEqual(const_cast<int64_t*>(&mValue), nNewValue, condition));
  114. return nNewValue;
  115. }
  116. template<> inline
  117. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::GetValue() const{
  118. uint64_t condition, nNewValue;
  119. do{
  120. nNewValue = condition = mValue; // Todo: This function has a problem unless the assignment of mValue to condition is atomic.
  121. } while(!InterlockedSetIfEqual(const_cast<uint64_t*>(&mValue), nNewValue, condition));
  122. return nNewValue;
  123. }
  124. template<> inline
  125. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::SetValue(ValueType n){
  126. int64_t condition;
  127. do{
  128. condition = mValue;
  129. } while(!InterlockedSetIfEqual(&mValue, n, condition));
  130. return condition;
  131. }
  132. template<> inline
  133. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::SetValue(ValueType n){
  134. uint64_t condition;
  135. do{
  136. condition = mValue;
  137. } while(!InterlockedSetIfEqual(&mValue, n, condition));
  138. return condition;
  139. }
  140. template<> inline
  141. bool AtomicInt<int64_t>::SetValueConditional(ValueType n, ValueType condition){
  142. return InterlockedSetIfEqual(&mValue, n, condition);
  143. }
  144. template<> inline
  145. bool AtomicInt<uint64_t>::SetValueConditional(ValueType n, ValueType condition){
  146. return InterlockedSetIfEqual(&mValue, n, condition);
  147. }
  148. template<> inline
  149. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Increment(){
  150. int64_t condition, nNewValue;
  151. do{
  152. condition = mValue;
  153. nNewValue = condition + 1;
  154. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  155. return nNewValue;
  156. }
  157. template<> inline
  158. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Increment(){
  159. uint64_t condition, nNewValue;
  160. do{
  161. condition = mValue;
  162. nNewValue = condition + 1;
  163. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  164. return nNewValue;
  165. }
  166. template<> inline
  167. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Decrement(){
  168. int64_t condition, nNewValue;
  169. do{
  170. condition = mValue;
  171. nNewValue = condition - 1;
  172. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  173. return nNewValue;
  174. }
  175. template<> inline
  176. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Decrement(){
  177. uint64_t condition, nNewValue;
  178. do{
  179. condition = mValue;
  180. nNewValue = condition - 1;
  181. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  182. return nNewValue;
  183. }
  184. template<> inline
  185. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Add(ValueType n){
  186. int64_t condition, nNewValue;
  187. do{
  188. condition = mValue;
  189. nNewValue = condition + n;
  190. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  191. return nNewValue;
  192. }
  193. template<> inline
  194. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Add(ValueType n){
  195. uint64_t condition, nNewValue;
  196. do{
  197. condition = mValue;
  198. nNewValue = condition + n;
  199. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  200. return nNewValue;
  201. }
  202. #elif defined(EA_COMPILER_GNUC) || defined (EA_COMPILER_CLANG)
  203. // Recent versions of GCC have atomic primitives built into the compiler and standard library.
  204. #if defined (EA_COMPILER_CLANG) || defined(EA_PLATFORM_APPLE) || (defined(EA_COMPILER_GNUC) && EA_COMPILER_VERSION >= 4003) // GCC 4.3 or later
  205. template <> inline
  206. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::GetValue() const
  207. { return __sync_add_and_fetch(const_cast<ValueType*>(&mValue), 0); }
  208. template <> inline
  209. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::GetValue() const
  210. { return __sync_add_and_fetch(const_cast<ValueType*>(&mValue), 0); }
  211. template <> inline
  212. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::SetValue(ValueType n)
  213. { __sync_synchronize(); return __sync_lock_test_and_set(&mValue, n); }
  214. template <> inline
  215. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::SetValue(ValueType n)
  216. { __sync_synchronize(); return __sync_lock_test_and_set(&mValue, n); }
  217. template <> inline
  218. bool AtomicInt<int32_t>::SetValueConditional(ValueType n, ValueType condition)
  219. { return (__sync_val_compare_and_swap(&mValue, condition, n) == condition); }
  220. template <> inline
  221. bool AtomicInt<uint32_t>::SetValueConditional(ValueType n, ValueType condition)
  222. { return (__sync_val_compare_and_swap(&mValue, condition, n) == condition); }
  223. template <> inline
  224. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Increment()
  225. { return __sync_add_and_fetch(&mValue, 1); }
  226. template <> inline
  227. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Increment()
  228. { return __sync_add_and_fetch(&mValue, 1); }
  229. template <> inline
  230. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Decrement()
  231. { return __sync_sub_and_fetch(&mValue, 1); }
  232. template <> inline
  233. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Decrement()
  234. { return __sync_sub_and_fetch(&mValue, 1); }
  235. template <> inline
  236. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Add(ValueType n)
  237. { return __sync_add_and_fetch(&mValue, n); }
  238. template <> inline
  239. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Add(ValueType n)
  240. { return __sync_add_and_fetch(&mValue, n); }
  241. template <> inline
  242. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::GetValue() const
  243. { return __sync_add_and_fetch(const_cast<ValueType*>(&mValue), 0); }
  244. template <> inline
  245. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::GetValue() const
  246. { return __sync_add_and_fetch(const_cast<ValueType*>(&mValue), 0); }
  247. template <> inline
  248. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::SetValue(ValueType n)
  249. { __sync_synchronize(); return __sync_lock_test_and_set(&mValue, n); }
  250. template <> inline
  251. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::SetValue(ValueType n)
  252. { __sync_synchronize(); return __sync_lock_test_and_set(&mValue, n); }
  253. template <> inline
  254. bool AtomicInt<int64_t>::SetValueConditional(ValueType n, ValueType condition)
  255. { return (__sync_val_compare_and_swap(&mValue, condition, n) == condition); }
  256. template <> inline
  257. bool AtomicInt<uint64_t>::SetValueConditional(ValueType n, ValueType condition)
  258. { return (__sync_val_compare_and_swap(&mValue, condition, n) == condition); }
  259. template <> inline
  260. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Increment()
  261. { return __sync_add_and_fetch(&mValue, 1); }
  262. template <> inline
  263. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Increment()
  264. { return __sync_add_and_fetch(&mValue, 1); }
  265. template <> inline
  266. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Decrement()
  267. { return __sync_sub_and_fetch(&mValue, 1); }
  268. template <> inline
  269. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Decrement()
  270. { return __sync_sub_and_fetch(&mValue, 1); }
  271. template <> inline
  272. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Add(ValueType n)
  273. { return __sync_add_and_fetch(&mValue, n); }
  274. template <> inline
  275. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Add(ValueType n)
  276. { return __sync_add_and_fetch(&mValue, n); }
  277. #else
  278. // If the above intrinsics aren't used...
  279. #ifndef InterlockedCompareExchangeImp
  280. namespace
  281. {
  282. int32_t InterlockedExchange(volatile int32_t* m, int32_t n)
  283. {
  284. int32_t result;
  285. __asm__ __volatile__ (
  286. "xchgl %%eax, (%2)" // The xchg instruction does an implicit lock instruction.
  287. : "=a" (result) // outputs
  288. : "a" (n), "q" (m) // inputs
  289. : "memory" // clobbered
  290. );
  291. return result;
  292. }
  293. int32_t InterlockedCompareExchange(volatile int32_t* m, int32_t n, int32_t condition)
  294. {
  295. int32_t result;
  296. __asm__ __volatile__(
  297. "lock; cmpxchgl %3, (%1) \n" // Test *m against EAX, if same, then *m = n
  298. : "=a" (result), "=q" (m) // outputs
  299. : "a" (condition), "q" (n), "1" (m) // inputs
  300. : "memory" // clobbered
  301. );
  302. return result;
  303. }
  304. #define InterlockedExchangeImp InterlockedExchange
  305. #define InterlockedCompareExchangeImp InterlockedCompareExchange
  306. }
  307. #endif
  308. // 32 bit versions
  309. template<> inline
  310. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::SetValue(ValueType n)
  311. { return (ValueType)InterlockedExchangeImp(&mValue, n); }
  312. template<> inline
  313. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::SetValue(ValueType n)
  314. { return (ValueType)InterlockedExchangeImp((int32_t*)&mValue, n); }
  315. template<> inline
  316. bool AtomicInt<int32_t>::SetValueConditional(ValueType n, ValueType condition)
  317. { return ((ValueType)InterlockedCompareExchangeImp(&mValue, n, condition) == condition); }
  318. template<> inline
  319. bool AtomicInt<uint32_t>::SetValueConditional(ValueType n, ValueType condition)
  320. { return ((ValueType)InterlockedCompareExchangeImp((int32_t*)&mValue, n, condition) == condition); }
  321. template<> inline
  322. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Increment()
  323. {
  324. int32_t result;
  325. __asm__ __volatile__ ("lock; xaddl %0, %1"
  326. : "=r" (result), "=m" (mValue)
  327. : "0" (1), "m" (mValue)
  328. : "memory"
  329. );
  330. return result + 1;
  331. }
  332. template<> inline
  333. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Increment()
  334. {
  335. int32_t result;
  336. __asm__ __volatile__ ("lock; xaddl %0, %1"
  337. : "=r" (result), "=m" (mValue)
  338. : "0" (1), "m" (mValue)
  339. : "memory"
  340. );
  341. return result + 1;
  342. }
  343. template<> inline
  344. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Decrement()
  345. {
  346. int32_t result;
  347. __asm__ __volatile__ ("lock; xaddl %0, %1"
  348. : "=r" (result), "=m" (mValue)
  349. : "0" (-1), "m" (mValue)
  350. : "memory"
  351. );
  352. return result - 1;
  353. }
  354. template<> inline
  355. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Decrement()
  356. {
  357. uint32_t result;
  358. __asm__ __volatile__ ("lock; xaddl %0, %1"
  359. : "=r" (result), "=m" (mValue)
  360. : "0" (-1), "m" (mValue)
  361. : "memory"
  362. );
  363. return result - 1;
  364. }
  365. template<> inline
  366. AtomicInt<int32_t>::ValueType AtomicInt<int32_t>::Add(ValueType n)
  367. {
  368. int32_t result;
  369. __asm__ __volatile__ ("lock; xaddl %0, %1"
  370. : "=r" (result), "=m" (mValue)
  371. : "0" (n), "m" (mValue)
  372. : "memory"
  373. );
  374. return result + n;
  375. }
  376. template<> inline
  377. AtomicInt<uint32_t>::ValueType AtomicInt<uint32_t>::Add(ValueType n)
  378. {
  379. uint32_t result;
  380. __asm__ __volatile__ ("lock; xaddl %0, %1"
  381. : "=r" (result), "=m" (mValue)
  382. : "0" (n), "m" (mValue)
  383. : "memory"
  384. );
  385. return result + n;
  386. }
  387. // 64 bit versions
  388. inline bool
  389. InterlockedSetIfEqual(volatile int64_t* dest, int64_t newValue, int64_t condition)
  390. {
  391. int64_t oldValue;
  392. __asm __volatile ("lock; cmpxchg8b %1"
  393. : "=A" (oldValue), "=m" (*dest)
  394. : "b" (((int32_t) newValue) & 0xffffffff),
  395. "c" ((int32_t)(newValue >> 32)),
  396. "m" (*dest), "a" (((int32_t) condition) & 0xffffffff),
  397. "d" ((int32_t)(condition >> 32)));
  398. return oldValue == condition;
  399. // Reference non-thread-safe implementation:
  400. // if(*dest == condition)
  401. // {
  402. // *dest = newValue
  403. // return true;
  404. // }
  405. // return false;
  406. }
  407. inline bool
  408. InterlockedSetIfEqual(volatile uint64_t* dest, uint64_t newValue, uint64_t condition)
  409. {
  410. uint64_t oldValue;
  411. __asm __volatile ("lock; cmpxchg8b %1"
  412. : "=A" (oldValue), "=m" (*dest)
  413. : "b" (((uint32_t) newValue) & 0xffffffff),
  414. "c" ((uint32_t)(newValue >> 32)),
  415. "m" (*dest), "a" (((uint32_t) condition) & 0xffffffff),
  416. "d" ((uint32_t)(condition >> 32)));
  417. return oldValue == condition;
  418. // Reference non-thread-safe implementation:
  419. // if(*dest == condition)
  420. // {
  421. // *dest = newValue
  422. // return true;
  423. // }
  424. // return false;
  425. }
  426. template<> inline
  427. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::GetValue() const{
  428. int64_t condition, nNewValue;
  429. do{
  430. nNewValue = condition = mValue; // Todo: This function has a problem unless the assignment of mValue to condition is atomic.
  431. } while(!InterlockedSetIfEqual(const_cast<int64_t*>(&mValue), nNewValue, condition));
  432. return nNewValue;
  433. }
  434. template<> inline
  435. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::GetValue() const{
  436. uint64_t condition, nNewValue;
  437. do{
  438. nNewValue = condition = mValue; // Todo: This function has a problem unless the assignment of mValue to condition is atomic.
  439. } while(!InterlockedSetIfEqual(const_cast<uint64_t*>(&mValue), nNewValue, condition));
  440. return nNewValue;
  441. }
  442. template<> inline
  443. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::SetValue(ValueType n){
  444. int64_t condition;
  445. do{
  446. condition = mValue;
  447. } while(!InterlockedSetIfEqual(&mValue, n, condition));
  448. return condition;
  449. }
  450. template<> inline
  451. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::SetValue(ValueType n){
  452. uint64_t condition;
  453. do{
  454. condition = mValue;
  455. } while(!InterlockedSetIfEqual(&mValue, n, condition));
  456. return condition;
  457. }
  458. template<> inline
  459. bool AtomicInt<int64_t>::SetValueConditional(ValueType n, ValueType condition){
  460. return InterlockedSetIfEqual(&mValue, n, condition);
  461. }
  462. template<> inline
  463. bool AtomicInt<uint64_t>::SetValueConditional(ValueType n, ValueType condition){
  464. return InterlockedSetIfEqual(&mValue, n, condition);
  465. }
  466. template<> inline
  467. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Increment(){
  468. int64_t condition, nNewValue;
  469. do{
  470. condition = mValue;
  471. nNewValue = condition + 1;
  472. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  473. return nNewValue;
  474. }
  475. template<> inline
  476. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Increment(){
  477. uint64_t condition, nNewValue;
  478. do{
  479. condition = mValue;
  480. nNewValue = condition + 1;
  481. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  482. return nNewValue;
  483. }
  484. template<> inline
  485. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Decrement(){
  486. int64_t condition, nNewValue;
  487. do{
  488. condition = mValue;
  489. nNewValue = condition - 1;
  490. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  491. return nNewValue;
  492. }
  493. template<> inline
  494. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Decrement(){
  495. uint64_t condition, nNewValue;
  496. do{
  497. condition = mValue;
  498. nNewValue = condition - 1;
  499. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  500. return nNewValue;
  501. }
  502. template<> inline
  503. AtomicInt<int64_t>::ValueType AtomicInt<int64_t>::Add(ValueType n){
  504. int64_t condition, nNewValue;
  505. do{
  506. condition = mValue;
  507. nNewValue = condition + n;
  508. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  509. return nNewValue;
  510. }
  511. template<> inline
  512. AtomicInt<uint64_t>::ValueType AtomicInt<uint64_t>::Add(ValueType n){
  513. uint64_t condition, nNewValue;
  514. do{
  515. condition = mValue;
  516. nNewValue = condition + n;
  517. } while(!InterlockedSetIfEqual(&mValue, nNewValue, condition));
  518. return nNewValue;
  519. }
  520. #endif
  521. #elif defined(EA_COMPILER_INTEL) || defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_BORLAND)
  522. // This is won't compile when ValueType is 64 bits.
  523. template<class T> inline
  524. typename AtomicInt<T>::ValueType AtomicInt<T>::SetValue(ValueType n)
  525. {
  526. __asm{
  527. mov ecx, this // mValue is expected to be at offset zero of this.
  528. mov eax, n
  529. xchg eax, dword ptr [ecx] // The xchg instruction does an implicit lock instruction.
  530. }
  531. }
  532. template<class T> inline
  533. bool AtomicInt<T>::SetValueConditional(ValueType n, ValueType condition)
  534. {
  535. __asm{
  536. mov ecx, this // mValue is expected to be at offset zero of this.
  537. mov edx, n
  538. mov eax, condition
  539. lock cmpxchg dword ptr [ecx], edx // Compares mValue to condition. If equal, z flag is set and n is copied into mValue.
  540. jz condition_met
  541. xor eax, eax
  542. jmp end
  543. condition_met:
  544. mov eax, 1
  545. end:
  546. }
  547. }
  548. template<class T> inline
  549. bool typename AtomicInt<T>::ValueType AtomicInt<T>::Increment()
  550. {
  551. __asm{
  552. mov ecx, this // mValue is expected to be at offset zero of this.
  553. mov eax, 1
  554. lock xadd dword ptr [ecx], eax // Sum goes into [ecx], old mValue goes into eax.
  555. inc eax // Increment eax because the return value is the new value.
  556. }
  557. }
  558. template<class T> inline
  559. bool typename AtomicInt<T>::ValueType AtomicInt<T>::Decrement()
  560. {
  561. __asm{
  562. mov ecx, this // mValue is expected to be at offset zero of this.
  563. mov eax, 0xffffffff
  564. lock xadd dword ptr [ecx], eax // Sum goes into [ecx], old mValue goes into eax.
  565. dec eax // Increment eax because the return value is the new value.
  566. }
  567. }
  568. template<class T> inline
  569. bool typename AtomicInt<T>::ValueType AtomicInt<T>::Add(ValueType n)
  570. {
  571. __asm{
  572. mov ecx, this // mValue is expected to be at offset zero of this.
  573. mov eax, n
  574. lock xadd dword ptr [ecx], eax // Sum goes into [ecx], old mValue goes into eax.
  575. add eax, n
  576. }
  577. }
  578. #else
  579. // Compiler not currently supported.
  580. #endif
  581. } // namespace Thread
  582. } // namespace EA
  583. #endif // EA_PROCESSOR_X86
  584. EA_RESTORE_VC_WARNING()
  585. #endif // EATHREAD_X86_EATHREAD_ATOMIC_X86_H