generic.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. /*-------------------------------------------------------------------------
  2. *
  3. * generic.h
  4. * Implement higher level operations based on some lower level atomic
  5. * operations.
  6. *
  7. * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
  8. * Portions Copyright (c) 1994, Regents of the University of California
  9. *
  10. * src/include/port/atomics/generic.h
  11. *
  12. *-------------------------------------------------------------------------
  13. */
  14. /* intentionally no include guards, should only be included by atomics.h */
  15. #ifndef INSIDE_ATOMICS_H
  16. # error "should be included via atomics.h"
  17. #endif
  18. /*
  19. * If read or write barriers are undefined, we upgrade them to full memory
  20. * barriers.
  21. */
  22. #if !defined(pg_read_barrier_impl)
  23. # define pg_read_barrier_impl pg_memory_barrier_impl
  24. #endif
  25. #if !defined(pg_write_barrier_impl)
  26. # define pg_write_barrier_impl pg_memory_barrier_impl
  27. #endif
  28. #ifndef PG_HAVE_SPIN_DELAY
  29. #define PG_HAVE_SPIN_DELAY
  30. #define pg_spin_delay_impl() ((void)0)
  31. #endif
  32. /* provide fallback */
  33. #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && defined(PG_HAVE_ATOMIC_U32_SUPPORT)
  34. #define PG_HAVE_ATOMIC_FLAG_SUPPORT
  35. typedef pg_atomic_uint32 pg_atomic_flag;
  36. #endif
  37. #ifndef PG_HAVE_ATOMIC_READ_U32
  38. #define PG_HAVE_ATOMIC_READ_U32
  39. static inline uint32
  40. pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
  41. {
  42. return ptr->value;
  43. }
  44. #endif
  45. #ifndef PG_HAVE_ATOMIC_WRITE_U32
  46. #define PG_HAVE_ATOMIC_WRITE_U32
  47. static inline void
  48. pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
  49. {
  50. ptr->value = val;
  51. }
  52. #endif
  53. #ifndef PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32
  54. #define PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32
  55. static inline void
  56. pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
  57. {
  58. ptr->value = val;
  59. }
  60. #endif
  61. /*
  62. * provide fallback for test_and_set using atomic_exchange if available
  63. */
  64. #if !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_EXCHANGE_U32)
  65. #define PG_HAVE_ATOMIC_INIT_FLAG
  66. static inline void
  67. pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
  68. {
  69. pg_atomic_write_u32_impl(ptr, 0);
  70. }
  71. #define PG_HAVE_ATOMIC_TEST_SET_FLAG
  72. static inline bool
  73. pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
  74. {
  75. return pg_atomic_exchange_u32_impl(ptr, &value, 1) == 0;
  76. }
  77. #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
  78. static inline bool
  79. pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
  80. {
  81. return pg_atomic_read_u32_impl(ptr) == 0;
  82. }
  83. #define PG_HAVE_ATOMIC_CLEAR_FLAG
  84. static inline void
  85. pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
  86. {
  87. /* XXX: release semantics suffice? */
  88. pg_memory_barrier_impl();
  89. pg_atomic_write_u32_impl(ptr, 0);
  90. }
  91. /*
  92. * provide fallback for test_and_set using atomic_compare_exchange if
  93. * available.
  94. */
  95. #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
  96. #define PG_HAVE_ATOMIC_INIT_FLAG
  97. static inline void
  98. pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
  99. {
  100. pg_atomic_write_u32_impl(ptr, 0);
  101. }
  102. #define PG_HAVE_ATOMIC_TEST_SET_FLAG
  103. static inline bool
  104. pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
  105. {
  106. uint32 value = 0;
  107. return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
  108. }
  109. #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
  110. static inline bool
  111. pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
  112. {
  113. return pg_atomic_read_u32_impl(ptr) == 0;
  114. }
  115. #define PG_HAVE_ATOMIC_CLEAR_FLAG
  116. static inline void
  117. pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
  118. {
  119. /*
  120. * Use a memory barrier + plain write if we have a native memory
  121. * barrier. But don't do so if memory barriers use spinlocks - that'd lead
  122. * to circularity if flags are used to implement spinlocks.
  123. */
  124. #ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
  125. /* XXX: release semantics suffice? */
  126. pg_memory_barrier_impl();
  127. pg_atomic_write_u32_impl(ptr, 0);
  128. #else
  129. uint32 value = 1;
  130. pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
  131. #endif
  132. }
  133. #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
  134. # error "No pg_atomic_test_and_set provided"
  135. #endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
  136. #ifndef PG_HAVE_ATOMIC_INIT_U32
  137. #define PG_HAVE_ATOMIC_INIT_U32
  138. static inline void
  139. pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
  140. {
  141. ptr->value = val_;
  142. }
  143. #endif
  144. #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
  145. #define PG_HAVE_ATOMIC_EXCHANGE_U32
  146. static inline uint32
  147. pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
  148. {
  149. uint32 old;
  150. old = ptr->value; /* ok if read is not atomic */
  151. while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_))
  152. /* skip */;
  153. return old;
  154. }
  155. #endif
  156. #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
  157. #define PG_HAVE_ATOMIC_FETCH_ADD_U32
  158. static inline uint32
  159. pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
  160. {
  161. uint32 old;
  162. old = ptr->value; /* ok if read is not atomic */
  163. while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_))
  164. /* skip */;
  165. return old;
  166. }
  167. #endif
  168. #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
  169. #define PG_HAVE_ATOMIC_FETCH_SUB_U32
  170. static inline uint32
  171. pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
  172. {
  173. return pg_atomic_fetch_add_u32_impl(ptr, -sub_);
  174. }
  175. #endif
  176. #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
  177. #define PG_HAVE_ATOMIC_FETCH_AND_U32
  178. static inline uint32
  179. pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
  180. {
  181. uint32 old;
  182. old = ptr->value; /* ok if read is not atomic */
  183. while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_))
  184. /* skip */;
  185. return old;
  186. }
  187. #endif
  188. #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
  189. #define PG_HAVE_ATOMIC_FETCH_OR_U32
  190. static inline uint32
  191. pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
  192. {
  193. uint32 old;
  194. old = ptr->value; /* ok if read is not atomic */
  195. while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_))
  196. /* skip */;
  197. return old;
  198. }
  199. #endif
  200. #if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U32)
  201. #define PG_HAVE_ATOMIC_ADD_FETCH_U32
  202. static inline uint32
  203. pg_atomic_add_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
  204. {
  205. return pg_atomic_fetch_add_u32_impl(ptr, add_) + add_;
  206. }
  207. #endif
  208. #if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U32)
  209. #define PG_HAVE_ATOMIC_SUB_FETCH_U32
  210. static inline uint32
  211. pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
  212. {
  213. return pg_atomic_fetch_sub_u32_impl(ptr, sub_) - sub_;
  214. }
  215. #endif
  216. #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
  217. #define PG_HAVE_ATOMIC_EXCHANGE_U64
  218. static inline uint64
  219. pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
  220. {
  221. uint64 old;
  222. old = ptr->value; /* ok if read is not atomic */
  223. while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_))
  224. /* skip */;
  225. return old;
  226. }
  227. #endif
  228. #ifndef PG_HAVE_ATOMIC_WRITE_U64
  229. #define PG_HAVE_ATOMIC_WRITE_U64
  230. #if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \
  231. !defined(PG_HAVE_ATOMIC_U64_SIMULATION)
  232. static inline void
  233. pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
  234. {
  235. /*
  236. * On this platform aligned 64bit writes are guaranteed to be atomic,
  237. * except if using the fallback implementation, where can't guarantee the
  238. * required alignment.
  239. */
  240. AssertPointerAlignment(ptr, 8);
  241. ptr->value = val;
  242. }
  243. #else
  244. static inline void
  245. pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
  246. {
  247. /*
  248. * 64 bit writes aren't safe on all platforms. In the generic
  249. * implementation implement them as an atomic exchange.
  250. */
  251. pg_atomic_exchange_u64_impl(ptr, val);
  252. }
  253. #endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */
  254. #endif /* PG_HAVE_ATOMIC_WRITE_U64 */
  255. #ifndef PG_HAVE_ATOMIC_READ_U64
  256. #define PG_HAVE_ATOMIC_READ_U64
  257. #if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \
  258. !defined(PG_HAVE_ATOMIC_U64_SIMULATION)
  259. static inline uint64
  260. pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
  261. {
  262. /*
  263. * On this platform aligned 64-bit reads are guaranteed to be atomic.
  264. */
  265. AssertPointerAlignment(ptr, 8);
  266. return ptr->value;
  267. }
  268. #else
  269. static inline uint64
  270. pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
  271. {
  272. uint64 old = 0;
  273. /*
  274. * 64-bit reads aren't atomic on all platforms. In the generic
  275. * implementation implement them as a compare/exchange with 0. That'll
  276. * fail or succeed, but always return the old value. Possibly might store
  277. * a 0, but only if the previous value also was a 0 - i.e. harmless.
  278. */
  279. pg_atomic_compare_exchange_u64_impl(ptr, &old, 0);
  280. return old;
  281. }
  282. #endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */
  283. #endif /* PG_HAVE_ATOMIC_READ_U64 */
  284. #ifndef PG_HAVE_ATOMIC_INIT_U64
  285. #define PG_HAVE_ATOMIC_INIT_U64
  286. static inline void
  287. pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
  288. {
  289. ptr->value = val_;
  290. }
  291. #endif
  292. #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
  293. #define PG_HAVE_ATOMIC_FETCH_ADD_U64
  294. static inline uint64
  295. pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
  296. {
  297. uint64 old;
  298. old = ptr->value; /* ok if read is not atomic */
  299. while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_))
  300. /* skip */;
  301. return old;
  302. }
  303. #endif
  304. #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
  305. #define PG_HAVE_ATOMIC_FETCH_SUB_U64
  306. static inline uint64
  307. pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
  308. {
  309. return pg_atomic_fetch_add_u64_impl(ptr, -sub_);
  310. }
  311. #endif
  312. #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
  313. #define PG_HAVE_ATOMIC_FETCH_AND_U64
  314. static inline uint64
  315. pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
  316. {
  317. uint64 old;
  318. old = ptr->value; /* ok if read is not atomic */
  319. while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_))
  320. /* skip */;
  321. return old;
  322. }
  323. #endif
  324. #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
  325. #define PG_HAVE_ATOMIC_FETCH_OR_U64
  326. static inline uint64
  327. pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
  328. {
  329. uint64 old;
  330. old = ptr->value; /* ok if read is not atomic */
  331. while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_))
  332. /* skip */;
  333. return old;
  334. }
  335. #endif
  336. #if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U64)
  337. #define PG_HAVE_ATOMIC_ADD_FETCH_U64
  338. static inline uint64
  339. pg_atomic_add_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
  340. {
  341. return pg_atomic_fetch_add_u64_impl(ptr, add_) + add_;
  342. }
  343. #endif
  344. #if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U64)
  345. #define PG_HAVE_ATOMIC_SUB_FETCH_U64
  346. static inline uint64
  347. pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
  348. {
  349. return pg_atomic_fetch_sub_u64_impl(ptr, sub_) - sub_;
  350. }
  351. #endif