generic-gcc.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /*-------------------------------------------------------------------------
  2. *
  3. * generic-gcc.h
  4. * Atomic operations, implemented using gcc (or compatible) intrinsics.
  5. *
  6. * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
  7. * Portions Copyright (c) 1994, Regents of the University of California
  8. *
  9. * NOTES:
  10. *
  11. * Documentation:
  12. * * Legacy __sync Built-in Functions for Atomic Memory Access
  13. * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
  14. * * Built-in functions for memory model aware atomic operations
  15. * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
  16. *
  17. * src/include/port/atomics/generic-gcc.h
  18. *
  19. *-------------------------------------------------------------------------
  20. */
  21. /* intentionally no include guards, should only be included by atomics.h */
  22. #ifndef INSIDE_ATOMICS_H
  23. #error "should be included via atomics.h"
  24. #endif
  25. /*
  26. * An empty asm block should be a sufficient compiler barrier.
  27. */
  28. #define pg_compiler_barrier_impl() __asm__ __volatile__("" ::: "memory")
  29. /*
  30. * If we're on GCC 4.1.0 or higher, we should be able to get a memory barrier
  31. * out of this compiler built-in. But we prefer to rely on platform specific
  32. * definitions where possible, and use this only as a fallback.
  33. */
  34. #if !defined(pg_memory_barrier_impl)
  35. # if defined(HAVE_GCC__ATOMIC_INT32_CAS)
  36. # define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
  37. # elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
  38. # define pg_memory_barrier_impl() __sync_synchronize()
  39. # endif
  40. #endif /* !defined(pg_memory_barrier_impl) */
  41. #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
  42. /* acquire semantics include read barrier semantics */
  43. # define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
  44. #endif
  45. #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
  46. /* release semantics include write barrier semantics */
  47. # define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
  48. #endif
  49. #ifdef HAVE_ATOMICS
  50. /* generic gcc based atomic flag implementation */
  51. #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) \
  52. && (defined(HAVE_GCC__SYNC_INT32_TAS) || defined(HAVE_GCC__SYNC_CHAR_TAS))
  53. #define PG_HAVE_ATOMIC_FLAG_SUPPORT
  54. typedef struct pg_atomic_flag
  55. {
  56. /*
  57. * If we have a choice, use int-width TAS, because that is more efficient
  58. * and/or more reliably implemented on most non-Intel platforms. (Note
  59. * that this code isn't used on x86[_64]; see arch-x86.h for that.)
  60. */
  61. #ifdef HAVE_GCC__SYNC_INT32_TAS
  62. volatile int value;
  63. #else
  64. volatile char value;
  65. #endif
  66. } pg_atomic_flag;
  67. #endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
  68. /* generic gcc based atomic uint32 implementation */
  69. #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
  70. && (defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS))
  71. #define PG_HAVE_ATOMIC_U32_SUPPORT
  72. typedef struct pg_atomic_uint32
  73. {
  74. volatile uint32 value;
  75. } pg_atomic_uint32;
  76. #endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
  77. /* generic gcc based atomic uint64 implementation */
  78. #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
  79. && !defined(PG_DISABLE_64_BIT_ATOMICS) \
  80. && (defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS))
  81. #define PG_HAVE_ATOMIC_U64_SUPPORT
  82. typedef struct pg_atomic_uint64
  83. {
  84. volatile uint64 value pg_attribute_aligned(8);
  85. } pg_atomic_uint64;
  86. #endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
  87. #ifdef PG_HAVE_ATOMIC_FLAG_SUPPORT
  88. #if defined(HAVE_GCC__SYNC_CHAR_TAS) || defined(HAVE_GCC__SYNC_INT32_TAS)
  89. #ifndef PG_HAVE_ATOMIC_TEST_SET_FLAG
  90. #define PG_HAVE_ATOMIC_TEST_SET_FLAG
  91. static inline bool
  92. pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
  93. {
  94. /* NB: only an acquire barrier, not a full one */
  95. /* some platform only support a 1 here */
  96. return __sync_lock_test_and_set(&ptr->value, 1) == 0;
  97. }
  98. #endif
  99. #endif /* defined(HAVE_GCC__SYNC_*_TAS) */
  100. #ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
  101. #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
  102. static inline bool
  103. pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
  104. {
  105. return ptr->value == 0;
  106. }
  107. #endif
  108. #ifndef PG_HAVE_ATOMIC_CLEAR_FLAG
  109. #define PG_HAVE_ATOMIC_CLEAR_FLAG
  110. static inline void
  111. pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
  112. {
  113. __sync_lock_release(&ptr->value);
  114. }
  115. #endif
  116. #ifndef PG_HAVE_ATOMIC_INIT_FLAG
  117. #define PG_HAVE_ATOMIC_INIT_FLAG
  118. static inline void
  119. pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
  120. {
  121. pg_atomic_clear_flag_impl(ptr);
  122. }
  123. #endif
  124. #endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
  125. /* prefer __atomic, it has a better API */
  126. #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
  127. #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
  128. static inline bool
  129. pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
  130. uint32 *expected, uint32 newval)
  131. {
  132. /* FIXME: we can probably use a lower consistency model */
  133. return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
  134. __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
  135. }
  136. #endif
  137. #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
  138. #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
  139. static inline bool
  140. pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
  141. uint32 *expected, uint32 newval)
  142. {
  143. bool ret;
  144. uint32 current;
  145. current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
  146. ret = current == *expected;
  147. *expected = current;
  148. return ret;
  149. }
  150. #endif
  151. /* if we have 32-bit __sync_val_compare_and_swap, assume we have these too: */
  152. #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
  153. #define PG_HAVE_ATOMIC_FETCH_ADD_U32
  154. static inline uint32
  155. pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
  156. {
  157. return __sync_fetch_and_add(&ptr->value, add_);
  158. }
  159. #endif
  160. #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
  161. #define PG_HAVE_ATOMIC_FETCH_SUB_U32
  162. static inline uint32
  163. pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
  164. {
  165. return __sync_fetch_and_sub(&ptr->value, sub_);
  166. }
  167. #endif
  168. #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
  169. #define PG_HAVE_ATOMIC_FETCH_AND_U32
  170. static inline uint32
  171. pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
  172. {
  173. return __sync_fetch_and_and(&ptr->value, and_);
  174. }
  175. #endif
  176. #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
  177. #define PG_HAVE_ATOMIC_FETCH_OR_U32
  178. static inline uint32
  179. pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
  180. {
  181. return __sync_fetch_and_or(&ptr->value, or_);
  182. }
  183. #endif
  184. #if !defined(PG_DISABLE_64_BIT_ATOMICS)
  185. #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
  186. #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
  187. static inline bool
  188. pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
  189. uint64 *expected, uint64 newval)
  190. {
  191. return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
  192. __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
  193. }
  194. #endif
  195. #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
  196. #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
  197. static inline bool
  198. pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
  199. uint64 *expected, uint64 newval)
  200. {
  201. bool ret;
  202. uint64 current;
  203. current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
  204. ret = current == *expected;
  205. *expected = current;
  206. return ret;
  207. }
  208. #endif
  209. /* if we have 64-bit __sync_val_compare_and_swap, assume we have these too: */
  210. #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
  211. #define PG_HAVE_ATOMIC_FETCH_ADD_U64
  212. static inline uint64
  213. pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
  214. {
  215. return __sync_fetch_and_add(&ptr->value, add_);
  216. }
  217. #endif
  218. #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
  219. #define PG_HAVE_ATOMIC_FETCH_SUB_U64
  220. static inline uint64
  221. pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
  222. {
  223. return __sync_fetch_and_sub(&ptr->value, sub_);
  224. }
  225. #endif
  226. #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
  227. #define PG_HAVE_ATOMIC_FETCH_AND_U64
  228. static inline uint64
  229. pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
  230. {
  231. return __sync_fetch_and_and(&ptr->value, and_);
  232. }
  233. #endif
  234. #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
  235. #define PG_HAVE_ATOMIC_FETCH_OR_U64
  236. static inline uint64
  237. pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
  238. {
  239. return __sync_fetch_and_or(&ptr->value, or_);
  240. }
  241. #endif
  242. #endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
  243. #endif /* defined(HAVE_ATOMICS) */