fallback.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /*-------------------------------------------------------------------------
  2. *
  3. * fallback.h
  4. * Fallback for platforms without spinlock and/or atomics support. Slower
  5. * than native atomics support, but not unusably slow.
  6. *
  7. * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
  8. * Portions Copyright (c) 1994, Regents of the University of California
  9. *
  10. * src/include/port/atomics/fallback.h
  11. *
  12. *-------------------------------------------------------------------------
  13. */
  14. /* intentionally no include guards, should only be included by atomics.h */
  15. #ifndef INSIDE_ATOMICS_H
  16. # error "should be included via atomics.h"
  17. #endif
  18. #ifndef pg_memory_barrier_impl
  19. /*
  20. * If we have no memory barrier implementation for this architecture, we
  21. * fall back to acquiring and releasing a spinlock. This might, in turn,
  22. * fall back to the semaphore-based spinlock implementation, which will be
  23. * amazingly slow.
  24. *
  25. * It's not self-evident that every possible legal implementation of a
  26. * spinlock acquire-and-release would be equivalent to a full memory barrier.
  27. * For example, I'm not sure that Itanium's acq and rel add up to a full
  28. * fence. But all of our actual implementations seem OK in this regard.
  29. */
  30. #define PG_HAVE_MEMORY_BARRIER_EMULATION
  31. extern void pg_spinlock_barrier(void);
  32. #define pg_memory_barrier_impl pg_spinlock_barrier
  33. #endif
  34. #ifndef pg_compiler_barrier_impl
  35. /*
  36. * If the compiler/arch combination does not provide compiler barriers,
  37. * provide a fallback. The fallback simply consists of a function call into
  38. * an externally defined function. That should guarantee compiler barrier
  39. * semantics except for compilers that do inter translation unit/global
  40. * optimization - those better provide an actual compiler barrier.
  41. *
  42. * A native compiler barrier for sure is a lot faster than this...
  43. */
  44. #define PG_HAVE_COMPILER_BARRIER_EMULATION
  45. extern void pg_extern_compiler_barrier(void);
  46. #define pg_compiler_barrier_impl pg_extern_compiler_barrier
  47. #endif
  48. /*
  49. * If we have atomics implementation for this platform, fall back to providing
  50. * the atomics API using a spinlock to protect the internal state. Possibly
  51. * the spinlock implementation uses semaphores internally...
  52. *
  53. * We have to be a bit careful here, as it's not guaranteed that atomic
  54. * variables are mapped to the same address in every process (e.g. dynamic
  55. * shared memory segments). We can't just hash the address and use that to map
  56. * to a spinlock. Instead assign a spinlock on initialization of the atomic
  57. * variable.
  58. */
  59. #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
  60. #define PG_HAVE_ATOMIC_FLAG_SIMULATION
  61. #define PG_HAVE_ATOMIC_FLAG_SUPPORT
  62. typedef struct pg_atomic_flag
  63. {
  64. /*
  65. * To avoid circular includes we can't use s_lock as a type here. Instead
  66. * just reserve enough space for all spinlock types. Some platforms would
  67. * be content with just one byte instead of 4, but that's not too much
  68. * waste.
  69. */
  70. #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
  71. int sema[4];
  72. #else
  73. int sema;
  74. #endif
  75. volatile bool value;
  76. } pg_atomic_flag;
  77. #endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
  78. #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
  79. #define PG_HAVE_ATOMIC_U32_SIMULATION
  80. #define PG_HAVE_ATOMIC_U32_SUPPORT
  81. typedef struct pg_atomic_uint32
  82. {
  83. /* Check pg_atomic_flag's definition above for an explanation */
  84. #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
  85. int sema[4];
  86. #else
  87. int sema;
  88. #endif
  89. volatile uint32 value;
  90. } pg_atomic_uint32;
  91. #endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
  92. #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT)
  93. #define PG_HAVE_ATOMIC_U64_SIMULATION
  94. #define PG_HAVE_ATOMIC_U64_SUPPORT
  95. typedef struct pg_atomic_uint64
  96. {
  97. /* Check pg_atomic_flag's definition above for an explanation */
  98. #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
  99. int sema[4];
  100. #else
  101. int sema;
  102. #endif
  103. volatile uint64 value;
  104. } pg_atomic_uint64;
  105. #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
  106. #ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
  107. #define PG_HAVE_ATOMIC_INIT_FLAG
  108. extern void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr);
  109. #define PG_HAVE_ATOMIC_TEST_SET_FLAG
  110. extern bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr);
  111. #define PG_HAVE_ATOMIC_CLEAR_FLAG
  112. extern void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr);
  113. #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
  114. extern bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr);
  115. #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
  116. #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
  117. #define PG_HAVE_ATOMIC_INIT_U32
  118. extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_);
  119. #define PG_HAVE_ATOMIC_WRITE_U32
  120. extern void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val);
  121. #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
  122. extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
  123. uint32 *expected, uint32 newval);
  124. #define PG_HAVE_ATOMIC_FETCH_ADD_U32
  125. extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
  126. #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
  127. #ifdef PG_HAVE_ATOMIC_U64_SIMULATION
  128. #define PG_HAVE_ATOMIC_INIT_U64
  129. extern void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_);
  130. #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
  131. extern bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
  132. uint64 *expected, uint64 newval);
  133. #define PG_HAVE_ATOMIC_FETCH_ADD_U64
  134. extern uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_);
  135. #endif /* PG_HAVE_ATOMIC_U64_SIMULATION */