Explorar o código

- added membar_depends(), needed on smp archs. with separate cache banks
where it's possible to get a new pointer value, but the old pointer content
(e.g. if the two are in different cache banks and the "content" bank is very
busy processing a long invalidations queue). For now only Alpha SMP
needs it, on all other archs is a no-op (for more info see atomic_ops.h
, http://lse.sourceforge.net/locking/wmbdd.html,
http://www.linuxjournal.com/article/8212 or Alpha Architecture Reference
Manual Chapter 5.6.

- added membar_atomic_op(), membar_atomic_setget(), membar_read_atomic_op(),
membar_read_atomic_setget(), membar_write_atomic_op(),
membar_write_atomic_setget() -- special case memory barriers that can be
optimized if the atomic ops already force some kind of barrier (e.g. x86),
see the description in atomic_ops.h for more info.

Andrei Pelinescu-Onciul %!s(int64=18) %!d(string=hai) anos
pai
achega
ebc5ec7bd8

+ 28 - 2
atomic/atomic_alpha.h

@@ -28,6 +28,8 @@
  * --------
  *  2006-03-31  created by andrei
  *  2007-05-10  added atomic_add & atomic_cmpxchg (andrei)
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
+ *                membar_*_atomic_setget (andrei)
  */
 
 
@@ -44,19 +46,43 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 /* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
  * contain gcc barriers*/
-#define membar_enter_lock() 
-#define membar_leave_lock()
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 
 #else
 
 #define membar()		asm volatile ("    mb \n\t" : : : "memory" ) 
 #define membar_read()	membar()
 #define membar_write()	asm volatile ("    wmb \n\t" : : : "memory" )
+#define membar_depends()	asm volatile ("mb \n\t" : : : "memory" )
 #define membar_enter_lock() asm volatile("mb \n\t" : : : "memory")
 #define membar_leave_lock() asm volatile("mb \n\t" : : : "memory")
 
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 #endif /* NOSMP */
 
 

+ 16 - 2
atomic/atomic_arm.h

@@ -29,6 +29,8 @@
  * --------
  *  2006-03-31  created by andrei
  *  2007-05-10  added atomic_add and atomic_cmpxchg (andrei)
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
+ *                membar_*_atomic_setget (andrei)
  */
 
 
@@ -44,10 +46,22 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+#define membar_depends()   do {} while(0) /* really empty, not even a cc bar.*/
 /* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
  * contain gcc barriers*/
-#define membar_enter_lock() 
-#define membar_leave_lock()
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 #else /* SMP */
 #warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
 /* fall back to default lock based barriers (don't define HAVE_ASM...) */

+ 28 - 2
atomic/atomic_mips2.h

@@ -34,6 +34,8 @@
  * --------
  *  2006-03-08  created by andrei
  *  2007-05-10  added atomic_add & atomic_cmpxchg (andrei)
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
+ *                membar_*_atomic_setget (andrei)
  */
 
 
@@ -52,10 +54,22 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 /* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
  * contain gcc barriers*/
-#define membar_enter_lock() 
-#define membar_leave_lock()
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 
 #else
 
@@ -71,8 +85,20 @@
 
 #define membar_read()  membar()
 #define membar_write() membar()
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 #define membar_enter_lock() membar()
 #define membar_leave_lock() membar()
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 
 #endif /* NOSMP */
 

+ 28 - 2
atomic/atomic_ppc.h

@@ -34,6 +34,8 @@
  *               r0 as the second operand in addi and  addi rD,r0, val
  *               is a special case, equivalent with rD=0+val and not
  *               rD=r0+val (andrei)
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
+ *                membar_*_atomic_setget (andrei)
  */
 
 #ifndef _atomic_ppc_h
@@ -52,10 +54,22 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 /* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
  * contain gcc barriers*/
-#define membar_enter_lock() 
-#define membar_leave_lock()
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 
 #else
 #define membar() asm volatile ("sync \n\t" : : : "memory") 
@@ -63,10 +77,22 @@
 #define membar_read() asm volatile ("lwsync \n\t" : : : "memory") 
 /* on "normal" cached mem. eieio orders StoreStore */
 #define membar_write() asm volatile ("eieio \n\t" : : : "memory") 
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 #define membar_enter_lock() asm volatile("lwsync \n\t" : : : "memory")
 /* for unlock lwsync will work too and is faster then sync
  *  [IBM Prgramming Environments Manual, D.4.2.2] */
 #define membar_leave_lock() asm volatile("lwsync \n\t" : : : "memory")
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 #endif /* NOSMP */
 
 

+ 29 - 3
atomic/atomic_sparc.h

@@ -25,6 +25,8 @@
  * History:
  * --------
  *  2006-03-28  created by andrei
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
+ *                membar_*_atomic_setget (andrei)
  */
 
 
@@ -40,16 +42,40 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 /* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
  * contain gcc barriers*/
-#define membar_enter_lock() 
-#define membar_leave_lock()
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 #else /* SMP */
 #define membar_write() asm volatile ("stbar \n\t" : : : "memory") 
 #define membar() membar_write()
 #define membar_read() asm volatile ("" : : : "memory") 
-#define membar_enter_lock() 
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
+#define membar_enter_lock() do {} while(0)
 #define membar_leave_lock() asm volatile ("stbar \n\t" : : : "memory") 
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 
 #endif /* NOSMP */
 

+ 28 - 2
atomic/atomic_sparc64.h

@@ -30,6 +30,8 @@
  * --------
  *  2006-03-28  created by andrei
  *  2007-05-08 added atomic_add and atomic_cmpxchg (andrei)
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
+ *                membar_*_atomic_setget (andrei)
  */
 
 
@@ -52,6 +54,7 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 /*  memory barriers for lock & unlock where lock & unlock are inline asm
  *  functions that use atomic ops (and both of them use at least a store to
  *  the lock). membar_enter_lock() is at most a StoreStore|StoreLoad barrier
@@ -64,8 +67,19 @@
  *
  *  Usage: lock(); membar_enter_lock(); .... ; membar_leave_lock(); unlock()
  */
-#define membar_enter_lock()
-#define membar_leave_lock()
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 #else /* SMP */
 #define membar() \
 	asm volatile ( \
@@ -74,10 +88,22 @@
 
 #define membar_read() asm volatile ("membar #LoadLoad \n\t" : : : "memory")
 #define membar_write() asm volatile ("membar #StoreStore \n\t" : : : "memory")
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 #define membar_enter_lock() \
 	asm volatile ("membar #StoreStore | #StoreLoad \n\t" : : : "memory");
 #define membar_leave_lock() \
 	asm volatile ("membar #LoadStore | #StoreStore \n\t" : : : "memory");
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
 #endif /* NOSMP */
 
 

+ 26 - 1
atomic/atomic_unknown.h

@@ -35,6 +35,8 @@
  *  2006-03-08  created by andrei
  *  2007-05-11  added atomic_add and atomic_cmpxchg 
  *              use lock_set if lock economy is not needed (andrei)
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
+ *                membar_*_atomic_setget (andrei)
  */
 
 #ifndef _atomic_unknown_h
@@ -47,7 +49,7 @@
 #ifndef HAVE_ASM_INLINE_MEMBAR
 
 #ifdef NOSMP
-#define membar()
+#define membar() do {} while(0)
 #else /* SMP */
 
 #warning no native memory barrier implementations, falling back to slow lock \
@@ -82,6 +84,29 @@ extern gen_lock_t* __membar_lock; /* init in atomic_ops.c */
 
 #define membar_read()  membar()
 
+
+#ifndef __CPU_alpha
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
+#else
+/* really slow */
+#define membar_depends()  membar_read()
+#endif
+
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				membar()
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		membar_write()
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			membar_read()
+#define membar_read_atomic_setget()		membar_read()
+
 #endif /* HAVE_ASM_INLINE_MEMBAR */
 
 

+ 30 - 8
atomic/atomic_x86.h

@@ -35,6 +35,8 @@
  *  2006-03-08  created by andrei
  *  2007-05-07  added cmpxchg (andrei)
  *  2007-05-08  added atomic_add (andrei)
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
+ *                membar_*_atomic_setget (andrei)
  */
 
 #ifndef _atomic_x86_h
@@ -57,10 +59,22 @@
 #define membar()	asm volatile ("" : : : "memory")
 #define membar_read()	membar()
 #define membar_write()	membar()
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 /* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
  * contain gcc barriers*/
-#define membar_enter_lock() 
-#define membar_leave_lock()
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				do {} while(0)
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		do {} while(0)
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			do {} while(0)
+#define membar_read_atomic_setget()		membar_read()
 
 #else
 
@@ -99,15 +113,23 @@
 
 #endif /* __CPU_x86_64 */
 
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
 /* lock barrriers: empty, not needed on x86 or x86_64 (atomic ops already
  *  force the barriers if needed); the lock/unlock should already contain the 
  *  gcc do_not_cache barriers*/
-#define membar_enter_lock() 
-#define membar_leave_lock()
-
-
-
-
+#define membar_enter_lock() do {} while(0)
+#define membar_leave_lock() do {} while(0)
+/* membars after or before atomic_ops or atomic_setget -> use these or
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
+ *  situations (on some archs where the atomic operations imply memory
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
+ *    atomic_op_x(); membar()) */
+#define membar_atomic_op()				do {} while(0)
+#define membar_atomic_setget()			membar()
+#define membar_write_atomic_op()		do {} while(0)
+#define membar_write_atomic_setget()	membar_write()
+#define membar_read_atomic_op()			do {} while(0)
+#define membar_read_atomic_setget()		membar_read()
 
 
 #endif /* NOSMP */

+ 47 - 3
atomic_ops.h

@@ -22,9 +22,15 @@
  *  memory barriers:
  *  ----------------
  *
- *  void membar();       - memory barrier (load & store)
- *  void membar_read()   - load (read) memory barrier
- *  void membar_write()  - store (write) memory barrier
+ *  void membar();        - memory barrier (load & store)
+ *  void membar_read()    - load (read) memory barrier
+ *  void membar_write()   - store (write) memory barrier
+ *  void membar_depends() - read depends memory barrier, needed before using
+ *                          the contents of a pointer (for now is needed only
+ *                          on Alpha so on all other CPUs it will be a no-op)
+ *                          For more info see: 
+ *                          http://lse.sourceforge.net/locking/wmbdd.html
+ *                          http://www.linuxjournal.com/article/8212
  *
  *  void membar_enter_lock() - memory barrier function that should be 
  *                             called after a lock operation (where lock is
@@ -44,6 +50,37 @@
  *                             Example: raw_lock(l); membar_enter_lock(); ..
  *                                      ... critical section ...
  *                                      membar_leave_lock(); raw_unlock(l);
+ *  void membar_atomic_op() - memory barrier that should be called if a memory
+ *                            barrier is needed immediately after or 
+ *                            immediately before an atomic operation
+ *                            (for example: atomic_inc(&i); membar_atomic_op()
+ *                               instead of atomic_inc(&i); membar()).
+ *                            atomic_op means every atomic operation except get
+ *                            and set (for them use membar_atomic_setget()).
+ *                            Using membar_atomic_op() instead of membar() in
+ *                            these cases will generate faster code on some
+ *                            architectures (for now x86 and x86_64), where 
+ *                            atomic operations act also as memory barriers.
+ *                            Note that mb_atomic_<OP>(...) is equivalent to
+ *                            membar_atomic_op(); atomic_<OP>(...) and in this
+ *                            case the first form is preferred).
+ * void membar_atomic_setget() - same as above but for atomic_set and 
+ *                            atomic_get (and not for any other atomic op.,
+ *                            including atomic_get_and_set, for them use
+ *                            membar_atomic_op()).
+ *                            Note that mb_atomic_{get,set}(&i) is equivalent 
+ *                            and preferred to membar_atomic_setget(); 
+ *                            atomic_{get,set}(&i) (it will generate faster
+ *                            code on x86 and x86_64).
+ * void membar_read_atomic_op() - like membar_atomic_op(), but acts only as
+ *                             a read barrier.
+ * void membar_read_atomic_setget() - like membar_atomic_setget() but acts only
+ *                            as a read barrier.
+ * void membar_write_atomic_op() - like membar_atomic_op(), but acts only as
+ *                            a write barrier.
+ * void membar_write_atomic_setget() - like membar_atomic_setget() but acts 
+ *                            only as a write barrier.
+ *
  *
  *  Note: - properly using memory barriers is tricky, in general try not to 
  *        depend on them. Locks include memory barriers, so you don't need
@@ -67,6 +104,10 @@
  *  int atomic_dec_and_test(atomic_t* v)     - returns 1 if the result is 0
  *  void atomic_or (atomic_t* v, int mask)   - v->val|=mask 
  *  void atomic_and(atomic_t* v, int mask)   - v->val&=mask
+ *  int atomic_add(atomic_t* v, int i)       - v->val+=i; return v->val
+ *  int atomic_cmpxchg(atomic_t* v, o, n)    - r=v->val; if (r==o) v->val=n;
+ *                                             return r (old value)
+ *
  * 
  * same ops, but with builtin memory barriers:
  *
@@ -79,6 +120,9 @@
  *  int mb_atomic_dec_and_test(atomic_t* v)  - returns 1 if the result is 0
  *  void mb_atomic_or(atomic_t* v, int mask - v->val|=mask 
  *  void mb_atomic_and(atomic_t* v, int mask)- v->val&=mask
+ *  int mb_atomic_add(atomic_t* v, int i)    - v->val+=i; return v->val
+ *  int mb_atomic_cmpxchg(atomic_t* v, o, n) - r=v->val; if (r==o) v->val=n;
+ *                                             return r (old value)
  *
  *  Same operations are available for int and long. The functions are named
  *   after the following rules:

+ 18 - 0
test/atomic_test2.c

@@ -209,6 +209,24 @@ int main(int argc, char** argv)
 	printf(" membar_write() ........................ ok\n");
 	membar_read();
 	printf(" membar_read() ......................... ok\n");
+	membar_depends();
+	printf(" membar_depends() ...................... ok\n");
+	membar_enter_lock();
+	printf(" membar_enter_lock() ................... ok\n");
+	membar_leave_lock();
+	printf(" membar_leave_lock() ................... ok\n");
+	membar_atomic_op();
+	printf(" membar_atomic_op() .................... ok\n");
+	membar_atomic_setget();
+	printf(" membar_atomic_setget() ................ ok\n");
+	membar_read_atomic_op();
+	printf(" membar_read_atomic_op() ............... ok\n");
+	membar_read_atomic_setget();
+	printf(" membar_read_atomic_setget() ........... ok\n");
+	membar_write_atomic_op();
+	printf(" membar_write_atomic_op() .............. ok\n");
+	membar_write_atomic_setget();
+	printf(" membar_write_atomic_setget() .......... ok\n");
 	
 	printf("\nstarting atomic ops basic tests...\n");