atomic_ops.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * $Id$
  3. *
  4. * Copyright (C) 2006 iptelorg GmbH
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /*!
  19. * \file
  20. * \brief SIP-router core :: Atomic operations and memory barriers
  21. * \ingroup core
  22. * Module: \ref core
  23. * See \ref atomic
  24. */
  25. /*
  26. * \page atomicops Atomic operations and memory barriers
  27. *
  28. * WARNING: atomic ops do not include memory barriers
  29. *
  30. * memory barriers:
  31. * ----------------
  32. *
  33. * void membar(); - memory barrier (load & store)
  34. * void membar_read() - load (read) memory barrier
  35. * void membar_write() - store (write) memory barrier
  36. * void membar_depends() - read depends memory barrier, needed before using
  37. * the contents of a pointer (for now is needed only
  38. * on Alpha so on all other CPUs it will be a no-op)
  39. * For more info see:
  40. * http://lse.sourceforge.net/locking/wmbdd.html
  41. * http://www.linuxjournal.com/article/8212
  42. *
  43. * void membar_enter_lock() - memory barrier function that should be
  44. * called after a lock operation (where lock is
  45. * an asm inline function that uses atomic store
  46. * operation on the lock var.). It is at most
  47. * a StoreStore|StoreLoad barrier, but could also
  48. * be empty if an atomic op implies a memory
  49. * barrier on the specific arhitecture.
  50. * Example usage:
  51. * raw_lock(l); membar_enter_lock(); ...
  52. * void membar_leave_lock() - memory barrier function that should be called
  53. * before an unlock operation (where unlock is an
  54. * asm inline function that uses at least an atomic
  55. * store to on the lock var.). It is at most a
  56. * LoadStore|StoreStore barrier (but could also be
  57. * empty, see above).
  58. * Example: raw_lock(l); membar_enter_lock(); ..
  59. * ... critical section ...
  60. * membar_leave_lock(); raw_unlock(l);
  61. * void membar_atomic_op() - memory barrier that should be called if a memory
  62. * barrier is needed immediately after or
  63. * immediately before an atomic operation
  64. * (for example: atomic_inc(&i); membar_atomic_op()
  65. * instead of atomic_inc(&i); membar()).
  66. * atomic_op means every atomic operation except get
  67. * and set (for them use membar_atomic_setget()).
  68. * Using membar_atomic_op() instead of membar() in
  69. * these cases will generate faster code on some
  70. * architectures (for now x86 and x86_64), where
  71. * atomic operations act also as memory barriers.
  72. * Note that mb_atomic_<OP>(...) is equivalent to
  73. * membar_atomic_op(); atomic_<OP>(...) and in this
  74. * case the first form is preferred).
  75. * void membar_atomic_setget() - same as above but for atomic_set and
  76. * atomic_get (and not for any other atomic op.,
  77. * including atomic_get_and_set, for them use
  78. * membar_atomic_op()).
  79. * Note that mb_atomic_{get,set}(&i) is equivalent
  80. * and preferred to membar_atomic_setget();
  81. * atomic_{get,set}(&i) (it will generate faster
  82. * code on x86 and x86_64).
  83. * void membar_read_atomic_op() - like membar_atomic_op(), but acts only as
  84. * a read barrier.
  85. * void membar_read_atomic_setget() - like membar_atomic_setget() but acts only
  86. * as a read barrier.
  87. * void membar_write_atomic_op() - like membar_atomic_op(), but acts only as
  88. * a write barrier.
  89. * void membar_write_atomic_setget() - like membar_atomic_setget() but acts
  90. * only as a write barrier.
  91. *
  92. *
  93. * Note: - properly using memory barriers is tricky, in general try not to
  94. * depend on them. Locks include memory barriers, so you don't need
  95. * them for writes/load already protected by locks.
  96. * - membar_enter_lock() and membar_leave_lock() are needed only if
  97. * you implement your own locks using atomic ops (ser locks have the
  98. * membars included)
  99. *
  100. * atomic operations:
  101. * ------------------
  102. * type: atomic_t
  103. *
  104. * not including memory barriers:
  105. *
  106. * void atomic_set(atomic_t* v, int i) - v->val=i
  107. * int atomic_get(atomic_t* v) - return v->val
  108. * int atomic_get_and_set(atomic_t *v, i) - return old v->val, v->val=i
  109. * void atomic_inc(atomic_t* v)
  110. * void atomic_dec(atomic_t* v)
  111. * int atomic_inc_and_test(atomic_t* v) - returns 1 if the result is 0
  112. * int atomic_dec_and_test(atomic_t* v) - returns 1 if the result is 0
  113. * void atomic_or (atomic_t* v, int mask) - v->val|=mask
  114. * void atomic_and(atomic_t* v, int mask) - v->val&=mask
  115. * int atomic_add(atomic_t* v, int i) - v->val+=i; return v->val
  116. * int atomic_cmpxchg(atomic_t* v, o, n) - r=v->val; if (r==o) v->val=n;
  117. * return r (old value)
  118. *
  119. *
  120. * same ops, but with builtin memory barriers:
  121. *
  122. * void mb_atomic_set(atomic_t* v, int i) - v->val=i
  123. * int mb_atomic_get(atomic_t* v) - return v->val
  124. * int mb_atomic_get_and_set(atomic_t *v, i) - return old v->val, v->val=i
  125. * void mb_atomic_inc(atomic_t* v)
  126. * void mb_atomic_dec(atomic_t* v)
  127. * int mb_atomic_inc_and_test(atomic_t* v) - returns 1 if the result is 0
  128. * int mb_atomic_dec_and_test(atomic_t* v) - returns 1 if the result is 0
  129. * void mb_atomic_or(atomic_t* v, int mask - v->val|=mask
  130. * void mb_atomic_and(atomic_t* v, int mask)- v->val&=mask
  131. * int mb_atomic_add(atomic_t* v, int i) - v->val+=i; return v->val
  132. * int mb_atomic_cmpxchg(atomic_t* v, o, n) - r=v->val; if (r==o) v->val=n;
  133. * return r (old value)
  134. *
  135. * Same operations are available for int and long. The functions are named
  136. * after the following rules:
  137. * - add an int or long suffix to the correspondent atomic function
  138. * - volatile int* or volatile long* replace atomic_t* in the functions
  139. * declarations
  140. * - long and int replace the parameter type (if the function has an extra
  141. * parameter) and the return value
  142. * E.g.:
  143. * long atomic_get_long(volatile long* v)
  144. * int atomic_get_int( volatile int* v)
  145. * long atomic_get_and_set(volatile long* v, long l)
  146. * int atomic_get_and_set(volatile int* v, int i)
  147. *
  148. * Config defines: CC_GCC_LIKE_ASM - the compiler support gcc style
  149. * inline asm
  150. * NOSMP - the code will be a little faster, but not SMP
  151. * safe
  152. * __CPU_i386, __CPU_x86_64, X86_OOSTORE - see
  153. * atomic/atomic_x86.h
  154. * __CPU_mips, __CPU_mips2, __CPU_mips64, MIPS_HAS_LLSC - see
  155. * atomic/atomic_mip2.h
  156. * __CPU_ppc, __CPU_ppc64 - see atomic/atomic_ppc.h
  157. * __CPU_sparc - see atomic/atomic_sparc.h
  158. * __CPU_sparc64, SPARC64_MODE - see atomic/atomic_sparc64.h
  159. * __CPU_arm, __CPU_arm6 - see atomic/atomic_arm.h
  160. * __CPU_alpha - see atomic/atomic_alpha.h
  161. */
  162. /*
  163. * History:
  164. * --------
  165. * 2006-03-08 created by andrei
  166. * 2007-05-13 moved some of the decl. and includes into atomic_common.h and
  167. * atomic_native.h (andrei)
  168. */
  169. #ifndef __atomic_ops
  170. #define __atomic_ops
  171. #include "atomic/atomic_common.h"
  172. #include "atomic/atomic_native.h"
  173. /*! \brief if no native operations, emulate them using locks */
  174. #if ! defined HAVE_ASM_INLINE_ATOMIC_OPS || ! defined HAVE_ASM_INLINE_MEMBAR
  175. #include "atomic/atomic_unknown.h"
  176. #endif /* if HAVE_ASM_INLINE_ATOMIC_OPS */
  177. #endif