safe_refcount.cpp 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*************************************************************************/
  2. /* safe_refcount.cpp */
  3. /*************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* http://www.godotengine.org */
  7. /*************************************************************************/
  8. /* Copyright (c) 2007-2017 Juan Linietsky, Ariel Manzur. */
  9. /* Copyright (c) 2014-2017 Godot Engine contributors (cf. AUTHORS.md) */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /*************************************************************************/
  30. #include "safe_refcount.h"
  31. // Atomic functions, these are used for multithread safe reference counters!
  32. #ifdef NO_THREADS
  33. /* Bogus implementation unaware of multiprocessing */
  34. template <class T>
  35. static _ALWAYS_INLINE_ T _atomic_conditional_increment_impl(register T *pw) {
  36. if (*pw == 0)
  37. return 0;
  38. (*pw)++;
  39. return *pw;
  40. }
  41. template <class T>
  42. static _ALWAYS_INLINE_ T _atomic_decrement_impl(register T *pw) {
  43. (*pw)--;
  44. return *pw;
  45. }
  46. template <class T>
  47. static _ALWAYS_INLINE_ T _atomic_increment_impl(register T *pw) {
  48. (*pw)++;
  49. return *pw;
  50. }
  51. template <class T>
  52. static _ALWAYS_INLINE_ T _atomic_sub_impl(register T *pw, register T val) {
  53. (*pw) -= val;
  54. return *pw;
  55. }
  56. template <class T>
  57. static _ALWAYS_INLINE_ T _atomic_add_impl(register T *pw, register T val) {
  58. (*pw) += val;
  59. return *pw;
  60. }
  61. #elif defined(__GNUC__)
  62. /* Implementation for GCC & Clang */
  63. // GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
  64. // Clang states it supports GCC atomic builtins.
  65. template <class T>
  66. static _ALWAYS_INLINE_ T _atomic_conditional_increment_impl(register T *pw) {
  67. while (true) {
  68. T tmp = static_cast<T const volatile &>(*pw);
  69. if (tmp == 0)
  70. return 0; // if zero, can't add to it anymore
  71. if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp)
  72. return tmp + 1;
  73. }
  74. }
  75. template <class T>
  76. static _ALWAYS_INLINE_ T _atomic_decrement_impl(register T *pw) {
  77. return __sync_sub_and_fetch(pw, 1);
  78. }
  79. template <class T>
  80. static _ALWAYS_INLINE_ T _atomic_increment_impl(register T *pw) {
  81. return __sync_add_and_fetch(pw, 1);
  82. }
  83. template <class T>
  84. static _ALWAYS_INLINE_ T _atomic_sub_impl(register T *pw, register T val) {
  85. return __sync_sub_and_fetch(pw, val);
  86. }
  87. template <class T>
  88. static _ALWAYS_INLINE_ T _atomic_add_impl(register T *pw, register T val) {
  89. return __sync_add_and_fetch(pw, val);
  90. }
  91. #elif defined(_MSC_VER)
  92. /* Implementation for MSVC-Windows */
  93. // don't pollute my namespace!
  94. #include <windows.h>
  95. #define ATOMIC_CONDITIONAL_INCREMENT_BODY(m_pw, m_win_type, m_win_cmpxchg, m_cpp_type) \
  96. /* try to increment until it actually works */ \
  97. /* taken from boost */ \
  98. while (true) { \
  99. m_cpp_type tmp = static_cast<m_cpp_type const volatile &>(*(m_pw)); \
  100. if (tmp == 0) \
  101. return 0; /* if zero, can't add to it anymore */ \
  102. if (m_win_cmpxchg((m_win_type volatile *)(m_pw), tmp + 1, tmp) == tmp) \
  103. return tmp + 1; \
  104. }
  105. static _ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(register uint32_t *pw) {
  106. ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t)
  107. }
  108. static _ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(register uint32_t *pw) {
  109. return InterlockedDecrement((LONG volatile *)pw);
  110. }
  111. static _ALWAYS_INLINE_ uint32_t _atomic_increment_impl(register uint32_t *pw) {
  112. return InterlockedIncrement((LONG volatile *)pw);
  113. }
  114. static _ALWAYS_INLINE_ uint32_t _atomic_sub_impl(register uint32_t *pw, register uint32_t val) {
  115. #if _WIN32_WINNT >= 0x0601 // Windows 7+
  116. return InterlockedExchangeSubtract(pw, val) - val;
  117. #else
  118. return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val;
  119. #endif
  120. }
  121. static _ALWAYS_INLINE_ uint32_t _atomic_add_impl(register uint32_t *pw, register uint32_t val) {
  122. return InterlockedAdd((LONG volatile *)pw, val);
  123. }
  124. static _ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(register uint64_t *pw) {
  125. ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t)
  126. }
  127. static _ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(register uint64_t *pw) {
  128. return InterlockedDecrement64((LONGLONG volatile *)pw);
  129. }
  130. static _ALWAYS_INLINE_ uint64_t _atomic_increment_impl(register uint64_t *pw) {
  131. return InterlockedIncrement64((LONGLONG volatile *)pw);
  132. }
  133. static _ALWAYS_INLINE_ uint64_t _atomic_sub_impl(register uint64_t *pw, register uint64_t val) {
  134. #if _WIN32_WINNT >= 0x0601 // Windows 7+
  135. return InterlockedExchangeSubtract64(pw, val) - val;
  136. #else
  137. return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val;
  138. #endif
  139. }
  140. static _ALWAYS_INLINE_ uint64_t _atomic_add_impl(register uint64_t *pw, register uint64_t val) {
  141. return InterlockedAdd64((LONGLONG volatile *)pw, val);
  142. }
  143. #else
  144. //no threads supported?
  145. #error Must provide atomic functions for this platform or compiler!
  146. #endif
  147. // The actual advertised functions; they'll call the right implementation
  148. uint32_t atomic_conditional_increment(register uint32_t *counter) {
  149. return _atomic_conditional_increment_impl(counter);
  150. }
  151. uint32_t atomic_decrement(register uint32_t *pw) {
  152. return _atomic_decrement_impl(pw);
  153. }
  154. uint32_t atomic_increment(register uint32_t *pw) {
  155. return _atomic_increment_impl(pw);
  156. }
  157. uint32_t atomic_sub(register uint32_t *pw, register uint32_t val) {
  158. return _atomic_sub_impl(pw, val);
  159. }
  160. uint32_t atomic_add(register uint32_t *pw, register uint32_t val) {
  161. return _atomic_add_impl(pw, val);
  162. }
  163. uint64_t atomic_conditional_increment(register uint64_t *counter) {
  164. return _atomic_conditional_increment_impl(counter);
  165. }
  166. uint64_t atomic_decrement(register uint64_t *pw) {
  167. return _atomic_decrement_impl(pw);
  168. }
  169. uint64_t atomic_increment(register uint64_t *pw) {
  170. return _atomic_increment_impl(pw);
  171. }
  172. uint64_t atomic_sub(register uint64_t *pw, register uint64_t val) {
  173. return _atomic_sub_impl(pw, val);
  174. }
  175. uint64_t atomic_add(register uint64_t *pw, register uint64_t val) {
  176. return _atomic_add_impl(pw, val);
  177. }