atomic_unknown.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * Copyright (C) 2006 iptelorg GmbH
  3. *
  4. * Permission to use, copy, modify, and distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /**
  17. * @file
  18. * @brief Atomic operations and memory barriers implemented using locks
  19. *
  20. * Atomic operations and memory barriers implemented using locks
  21. * (for architectures not yet supported via inline assembler).
  22. *
  23. * \warning atomic ops do not include memory barriers, see atomic_ops.h
  24. * for more details
  25. *
  26. * Config defines:
  27. * - NOSMP (membars are null in this case)
  28. * - HAVE_ASM_INLINE_MEMBAR (membars already defined => use them)
  29. * - HAVE_ASM_INLINE_ATOMIC_OPS (atomic ops already defined => don't
  30. * redefine them)
  31. * @ingroup atomic
  32. */
  33. /*
  34. * History:
  35. * --------
  36. * 2006-03-08 created by andrei
  37. * 2007-05-11 added atomic_add and atomic_cmpxchg
  38. * use lock_set if lock economy is not needed (andrei)
  39. * 2007-05-29 added membar_depends(), membar_*_atomic_op and
  40. * membar_*_atomic_setget (andrei)
  41. */
  42. #ifndef _atomic_unknown_h
  43. #define _atomic_unknown_h
  44. #include "../lock_ops.h"
  45. #ifndef HAVE_ASM_INLINE_MEMBAR
  46. #ifdef NOSMP
  47. #define membar() do {} while(0)
  48. #else /* SMP */
  49. #warning no native memory barrier implementations, falling back to slow lock \
  50. based workarround
  51. #define MEMBAR_USES_LOCK
  52. extern gen_lock_t* __membar_lock; /* init in atomic_ops.c */
  53. #define _membar_lock lock_get(__membar_lock)
  54. #define _membar_unlock lock_release(__membar_lock)
  55. /* memory barriers
  56. * not a known cpu -> fall back unlock/lock: safe but costly (it should
  57. * include a memory barrier effect)
  58. * lock/unlock does not imply a full memory barrier effect (it allows mixing
  59. * operations from before the lock with operations after the lock _inside_
  60. * the lock & unlock block; however in most implementations it is equivalent
  61. * with at least membar StoreStore | StoreLoad | LoadStore => only LoadLoad
  62. * is missing). On the other hand and unlock/lock will always be equivalent
  63. * with a full memory barrier
  64. * => to be safe we must use either unlock; lock or lock; unlock; lock; unlock
  65. * --andrei*/
  66. #define membar() \
  67. do{\
  68. _membar_unlock; \
  69. _membar_lock; \
  70. } while(0)
  71. #endif /* NOSMP */
  72. #define membar_write() membar()
  73. #define membar_read() membar()
  74. #ifndef __CPU_alpha
  75. #define membar_depends() do {} while(0) /* really empty, not even a cc bar. */
  76. #else
  77. /* really slow */
  78. #define membar_depends() membar_read()
  79. #endif
  80. #define membar_enter_lock() do {} while(0)
  81. #define membar_leave_lock() do {} while(0)
  82. /* membars after or before atomic_ops or atomic_setget -> use these or
  83. * mb_<atomic_op_name>() if you need a memory barrier in one of these
  84. * situations (on some archs where the atomic operations imply memory
  85. * barriers is better to use atomic_op_x(); membar_atomic_op() then
  86. * atomic_op_x(); membar()) */
  87. #define membar_atomic_op() membar()
  88. #define membar_atomic_setget() membar()
  89. #define membar_write_atomic_op() membar_write()
  90. #define membar_write_atomic_setget() membar_write()
  91. #define membar_read_atomic_op() membar_read()
  92. #define membar_read_atomic_setget() membar_read()
  93. #endif /* HAVE_ASM_INLINE_MEMBAR */
  94. #ifndef HAVE_ASM_INLINE_ATOMIC_OPS
  95. #ifdef GEN_LOCK_SET_T_UNLIMITED
  96. #ifndef ATOMIC_OPS_USE_LOCK_SET
  97. #define ATOMIC_OPS_USE_LOCK_SET
  98. #endif
  99. #else
  100. #ifndef ATOMIC_OPS_USE_LOCK
  101. #define ATOMIC_OPS_USE_LOCK
  102. #endif
  103. #endif /* GEN_LOCK_SET_T_UNLIMITED */
  104. #ifdef ATOMIC_OPS_USE_LOCK_SET
  105. #define _ATOMIC_LS_SIZE 256
  106. /* hash after the variable address: ignore first 4 bits since
  107. * vars are generally alloc'ed at at least 16 bytes multiples */
  108. #define _atomic_ls_hash(v) ((((unsigned long)(v))>>4)&(_ATOMIC_LS_SIZE-1))
  109. extern gen_lock_set_t* _atomic_lock_set;
  110. #define atomic_lock(v) lock_set_get(_atomic_lock_set, _atomic_ls_hash(v))
  111. #define atomic_unlock(v) lock_set_release(_atomic_lock_set, _atomic_ls_hash(v))
  112. #else
  113. extern gen_lock_t* _atomic_lock; /* declared and init in ../atomic_ops.c */
  114. #define atomic_lock(v) lock_get(_atomic_lock)
  115. #define atomic_unlock(v) lock_release(_atomic_lock)
  116. #endif /* ATOMIC_OPS_USE_LOCK_SET */
  117. /* atomic ops */
  118. /* OP can include var (function param), no other var. is declared */
  119. #define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
  120. inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
  121. { \
  122. atomic_lock(var); \
  123. OP ; \
  124. atomic_unlock(var); \
  125. return RET_EXPR; \
  126. }
  127. /* like above, but takes an extra param: v =>
  128. * OP can use var and v (function params) */
  129. #define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
  130. inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
  131. P_TYPE v) \
  132. { \
  133. atomic_lock(var); \
  134. OP ; \
  135. atomic_unlock(var); \
  136. return RET_EXPR; \
  137. }
  138. /* OP can include var (function param), and ret (return)
  139. * ( like ATOMIC_FUNC_DECL, but includes ret) */
  140. #define ATOMIC_FUNC_DECL_RET(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
  141. inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
  142. { \
  143. P_TYPE ret; \
  144. atomic_lock(var); \
  145. OP ; \
  146. atomic_unlock(var); \
  147. return RET_EXPR; \
  148. }
  149. /* like ATOMIC_FUNC_DECL1, but declares an extra variable: P_TYPE ret */
  150. #define ATOMIC_FUNC_DECL1_RET(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
  151. inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
  152. P_TYPE v) \
  153. { \
  154. P_TYPE ret; \
  155. atomic_lock(var); \
  156. OP ; \
  157. atomic_unlock(var); \
  158. return RET_EXPR; \
  159. }
  160. /* like ATOMIC_FUNC_DECL1_RET, but takes an extra param */
  161. #define ATOMIC_FUNC_DECL2_RET(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
  162. inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
  163. P_TYPE v1, P_TYPE v2)\
  164. { \
  165. P_TYPE ret; \
  166. atomic_lock(var); \
  167. OP ; \
  168. atomic_unlock(var); \
  169. return RET_EXPR; \
  170. }
  171. ATOMIC_FUNC_DECL(inc, (*var)++, int, void, /* no return */ )
  172. ATOMIC_FUNC_DECL(dec, (*var)--, int, void, /* no return */ )
  173. ATOMIC_FUNC_DECL1(and, *var&=v, int, void, /* no return */ )
  174. ATOMIC_FUNC_DECL1(or, *var|=v, int, void, /* no return */ )
  175. ATOMIC_FUNC_DECL_RET(inc_and_test, ret=++(*var), int, int, (ret==0) )
  176. ATOMIC_FUNC_DECL_RET(dec_and_test, ret=--(*var), int, int, (ret==0) )
  177. ATOMIC_FUNC_DECL1_RET(get_and_set, ret=*var;*var=v , int, int, ret)
  178. ATOMIC_FUNC_DECL2_RET(cmpxchg, ret=*var;\
  179. *var=(((ret!=v1)-1)&v2)+(~((ret!=v1)-1)&ret),\
  180. int, int, ret)
  181. ATOMIC_FUNC_DECL1_RET(add, *var+=v;ret=*var, int, int, ret )
  182. ATOMIC_FUNC_DECL(inc, (*var)++, long, void, /* no return */ )
  183. ATOMIC_FUNC_DECL(dec, (*var)--, long, void, /* no return */ )
  184. ATOMIC_FUNC_DECL1(and, *var&=v, long, void, /* no return */ )
  185. ATOMIC_FUNC_DECL1(or, *var|=v, long, void, /* no return */ )
  186. ATOMIC_FUNC_DECL_RET(inc_and_test, ret=++(*var), long, long, (ret==0) )
  187. ATOMIC_FUNC_DECL_RET(dec_and_test, ret=--(*var), long, long, (ret==0) )
  188. ATOMIC_FUNC_DECL1_RET(get_and_set, ret=*var;*var=v , long, long, ret)
  189. ATOMIC_FUNC_DECL2_RET(cmpxchg, ret=*var;\
  190. *var=(((ret!=v1)-1)&v2)+(~((ret!=v1)-1)&ret),\
  191. long, long, ret)
  192. ATOMIC_FUNC_DECL1_RET(add, *var+=v;ret=*var, long, long, ret )
  193. #define atomic_inc(var) atomic_inc_int(&(var)->val)
  194. #define atomic_dec(var) atomic_dec_int(&(var)->val)
  195. #define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
  196. #define atomic_or(var, mask) atomic_or_int(&(var)->val, (mask))
  197. #define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
  198. #define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
  199. #define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
  200. #define atomic_cmpxchg(var, old, new_v) \
  201. atomic_cmpxchg_int(&(var)->val, old, new_v)
  202. #define atomic_add(var, v) atomic_add_int(&(var)->val, v)
  203. /* memory barrier versions, the same as "normal" versions (since the
  204. * locks act as membars), * except fot * the set/get
  205. */
  206. /* mb_atomic_{set,get} use membar() : if we're lucky we have membars
  207. * for the arch. (e.g. sparc32) => membar() might be cheaper then lock/unlock */
  208. #define mb_atomic_set_int(v, i) \
  209. do{ \
  210. membar(); \
  211. atomic_set_int(v, i); \
  212. }while(0)
  213. inline static int mb_atomic_get_int(volatile int* v)
  214. {
  215. membar();
  216. return atomic_get_int(v);
  217. }
  218. #define mb_atomic_set_long(v, i) \
  219. do{ \
  220. membar(); \
  221. atomic_set_long(v, i); \
  222. }while(0)
  223. inline static long mb_atomic_get_long(volatile long* v)
  224. {
  225. membar();
  226. return atomic_get_long(v);
  227. }
  228. /* the rest are the same as the non membar version (the locks have a membar
  229. * effect) */
  230. #define mb_atomic_inc_int(v) atomic_inc_int(v)
  231. #define mb_atomic_dec_int(v) atomic_dec_int(v)
  232. #define mb_atomic_or_int(v, m) atomic_or_int(v, m)
  233. #define mb_atomic_and_int(v, m) atomic_and_int(v, m)
  234. #define mb_atomic_inc_and_test_int(v) atomic_inc_and_test_int(v)
  235. #define mb_atomic_dec_and_test_int(v) atomic_dec_and_test_int(v)
  236. #define mb_atomic_get_and_set_int(v, i) atomic_get_and_set_int(v, i)
  237. #define mb_atomic_cmpxchg_int(v, o, n) atomic_cmpxchg_int(v, o, n)
  238. #define mb_atomic_add_int(v, i) atomic_add_int(v, i)
  239. #define mb_atomic_inc_long(v) atomic_inc_long(v)
  240. #define mb_atomic_dec_long(v) atomic_dec_long(v)
  241. #define mb_atomic_or_long(v, m) atomic_or_long(v, m)
  242. #define mb_atomic_and_long(v, m) atomic_and_long(v, m)
  243. #define mb_atomic_inc_and_test_long(v) atomic_inc_and_test_long(v)
  244. #define mb_atomic_dec_and_test_long(v) atomic_dec_and_test_long(v)
  245. #define mb_atomic_get_and_set_long(v, i) atomic_get_and_set_long(v, i)
  246. #define mb_atomic_cmpxchg_long(v, o, n) atomic_cmpxchg_long(v, o, n)
  247. #define mb_atomic_add_long(v, i) atomic_add_long(v, i)
  248. #define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
  249. #define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
  250. #define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
  251. #define mb_atomic_or(var, mask) mb_atomic_or_int(&(var)->val, (mask))
  252. #define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
  253. #define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
  254. #define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
  255. #define mb_atomic_cmpxchg(v, o, n) atomic_cmpxchg_int(&(v)->val, o, n)
  256. #define mb_atomic_add(v, i) atomic_add_int(&(v)->val, i)
  257. #define mb_atomic_get(var) mb_atomic_get_int(&(var)->val)
  258. #define mb_atomic_set(var, i) mb_atomic_set_int(&(var)->val, i)
  259. #endif /* if HAVE_ASM_INLINE_ATOMIC_OPS */
  260. #endif