瀏覽代碼

- parts of atomic_ops.h moved into atomic/atomic_common.h and
atomic/atomic_native.h

- added membar_eneter_lock() and membar_leave_lock() (to be used only if
creating locks using the atomic ops functions, for more info see atomic_ops.h)

Andrei Pelinescu-Onciul 18 年之前
父節點
當前提交
118433b063
共有 10 個文件被更改,包括 228 次插入59 次删除
  1. 7 0
      atomic/atomic_alpha.h
  2. 4 0
      atomic/atomic_arm.h
  3. 54 0
      atomic/atomic_common.h
  4. 7 0
      atomic/atomic_mips2.h
  5. 83 0
      atomic/atomic_native.h
  6. 9 0
      atomic/atomic_ppc.h
  7. 7 0
      atomic/atomic_sparc.h
  8. 18 0
      atomic/atomic_sparc64.h
  9. 11 0
      atomic/atomic_x86.h
  10. 28 59
      atomic_ops.h

+ 7 - 0
atomic/atomic_alpha.h

@@ -44,11 +44,18 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
+ * contain gcc barriers*/
+#define membar_enter_lock() 
+#define membar_leave_lock()
+
 #else
 
 #define membar()		asm volatile ("    mb \n\t" : : : "memory" ) 
 #define membar_read()	membar()
 #define membar_write()	asm volatile ("    wmb \n\t" : : : "memory" )
+#define membar_enter_lock() asm volatile("mb \n\t" : : : "memory")
+#define membar_leave_lock() asm volatile("mb \n\t" : : : "memory")
 
 #endif /* NOSMP */
 

+ 4 - 0
atomic/atomic_arm.h

@@ -44,6 +44,10 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
+ * contain gcc barriers*/
+#define membar_enter_lock() 
+#define membar_leave_lock()
 #else /* SMP */
 #warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
 /* fall back to default lock based barriers (don't define HAVE_ASM...) */

+ 54 - 0
atomic/atomic_common.h

@@ -0,0 +1,54 @@
+/* 
+ * $Id$
+ * 
+ * Copyright (C) 2006 iptelorg GmbH
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * common part for all the atomic operations (atomic_t and common operations)
+ *  See atomic_ops.h for more info.
+ */
+/* 
+ * History:
+ * --------
+ *  2006-03-08  created by andrei
+ *  2007-05-13  split from atomic_ops.h (andrei)
+ */
+#ifndef __atomic_common
+#define __atomic_common
+
+/* atomic_t defined as a struct to easily catch non atomic ops. on it,
+ * e.g.  atomic_t  foo; foo++  will generate a compile error */
+typedef struct{ volatile int val; } atomic_t; 
+
+
+/* store and load operations are atomic on all cpus, note however that they
+ * don't include memory barriers so if you want to use atomic_{get,set} 
+ * to implement mutexes you must use the mb_* versions or explicitely use
+ * the barriers */
+
+#define atomic_set_int(pvar, i) (*(pvar)=i)
+#define atomic_set_long(pvar, i) (*(pvar)=i)
+#define atomic_get_int(pvar) (*(pvar))
+#define atomic_get_long(pvar) (*(pvar))
+
+#define atomic_set(at_var, value)	(atomic_set_int(&((at_var)->val), (value)))
+
+inline static int atomic_get(atomic_t *v)
+{
+	return atomic_get_int(&(v->val));
+}
+
+
+#endif

+ 7 - 0
atomic/atomic_mips2.h

@@ -52,6 +52,11 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
+ * contain gcc barriers*/
+#define membar_enter_lock() 
+#define membar_leave_lock()
+
 #else
 
 #define membar() \
@@ -66,6 +71,8 @@
 
 #define membar_read()  membar()
 #define membar_write() membar()
+#define membar_enter_lock() membar()
+#define membar_leave_lock() membar()
 
 #endif /* NOSMP */
 

+ 83 - 0
atomic/atomic_native.h

@@ -0,0 +1,83 @@
+/* 
+ * $Id$
+ * 
+ * Copyright (C) 2006 iptelorg GmbH
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ *  include file for native (asm) atomic operations and memory barriers
+ *  WARNING: atomic ops do not include memory barriers
+ *  See atomic_ops.h for more info.
+ *  Expects atomic_t to be defined (#include "atomic_common.h")
+ *
+ * Config defines:   CC_GCC_LIKE_ASM  - the compiler support gcc style
+ *                     inline asm
+ *                   NOSMP - the code will be a little faster, but not SMP
+ *                            safe
+ *                   __CPU_i386, __CPU_x86_64, X86_OOSTORE - see 
+ *                       atomic_x86.h
+ *                   __CPU_mips, __CPU_mips2, __CPU_mips64, MIPS_HAS_LLSC - see
+ *                       atomic_mip2.h
+ *                   __CPU_ppc, __CPU_ppc64 - see atomic_ppc.h
+ *                   __CPU_sparc - see atomic_sparc.h
+ *                   __CPU_sparc64, SPARC64_MODE - see atomic_sparc64.h
+ *                   __CPU_arm, __CPU_arm6 - see atomic_arm.h
+ *                   __CPU_alpha - see atomic_alpha.h
+ */
+/* 
+ * History:
+ * --------
+ *  2006-03-08  created by andrei
+ *  2007-05-13  split from atomic_ops.h (andrei)
+ */
+#ifndef __atomic_native
+#define __atomic_native
+
+#ifdef CC_GCC_LIKE_ASM
+
+#if defined __CPU_i386 || defined __CPU_x86_64
+
+#include "atomic_x86.h"
+
+#elif defined __CPU_mips2 || defined __CPU_mips64 || \
+	  ( defined __CPU_mips && defined MIPS_HAS_LLSC )
+
+#include "atomic_mips2.h"
+
+#elif defined __CPU_ppc || defined __CPU_ppc64
+
+#include "atomic_ppc.h"
+
+#elif defined __CPU_sparc64
+
+#include "atomic_sparc64.h"
+
+#elif defined __CPU_sparc
+
+#include "atomic_sparc.h"
+
+#elif defined __CPU_arm || defined __CPU_arm6
+
+#include "atomic_arm.h"
+
+#elif defined __CPU_alpha
+
+#include "atomic_alpha.h"
+
+#endif /* __CPU_xxx  => no known cpu */
+
+#endif /* CC_GCC_LIKE_ASM */
+
+
+#endif

+ 9 - 0
atomic/atomic_ppc.h

@@ -48,12 +48,21 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
+ * contain gcc barriers*/
+#define membar_enter_lock() 
+#define membar_leave_lock()
+
 #else
 #define membar() asm volatile ("sync \n\t" : : : "memory") 
 /* lwsync orders LoadLoad, LoadStore and StoreStore */
 #define membar_read() asm volatile ("lwsync \n\t" : : : "memory") 
 /* on "normal" cached mem. eieio orders StoreStore */
 #define membar_write() asm volatile ("eieio \n\t" : : : "memory") 
+#define membar_enter_lock() asm volatile("lwsync \n\t" : : : "memory")
+/* for unlock lwsync will work too and is faster then sync
+ *  [IBM Prgramming Environments Manual, D.4.2.2] */
+#define membar_leave_lock() asm volatile("lwsync \n\t" : : : "memory")
 #endif /* NOSMP */
 
 

+ 7 - 0
atomic/atomic_sparc.h

@@ -40,10 +40,17 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+/* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
+ * contain gcc barriers*/
+#define membar_enter_lock() 
+#define membar_leave_lock()
 #else /* SMP */
 #define membar_write() asm volatile ("stbar \n\t" : : : "memory") 
 #define membar() membar_write()
 #define membar_read() asm volatile ("" : : : "memory") 
+#define membar_enter_lock() 
+#define membar_leave_lock() asm volatile ("stbar \n\t" : : : "memory") 
+
 #endif /* NOSMP */
 
 

+ 18 - 0
atomic/atomic_sparc64.h

@@ -52,6 +52,20 @@
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
 #define membar_read()  membar()
 #define membar_write() membar()
+/*  memory barriers for lock & unlock where lock & unlock are inline asm
+ *  functions that use atomic ops (and both of them use at least a store to
+ *  the lock). membar_enter_lock() is at most a StoreStore|StoreLoad barrier
+ *   and membar_leave_lock() is at most a LoadStore|StoreStore barries
+ *  (if the atomic ops on the specific arhitecture imply these barriers
+ *   => these macros will be empty)
+ *   Warning: these barriers don't force LoadLoad ordering between code
+ *    before the lock/membar_enter_lock() and code 
+ *    after membar_leave_lock()/unlock()
+ *
+ *  Usage: lock(); membar_enter_lock(); .... ; membar_leave_lock(); unlock()
+ */
+#define membar_enter_lock()
+#define membar_leave_lock()
 #else /* SMP */
 #define membar() \
 	asm volatile ( \
@@ -60,6 +74,10 @@
 
 #define membar_read() asm volatile ("membar #LoadLoad \n\t" : : : "memory")
 #define membar_write() asm volatile ("membar #StoreStore \n\t" : : : "memory")
+#define membar_enter_lock() \
+	asm volatile ("membar #StoreStore | #StoreLoad \n\t" : : : "memory");
+#define membar_leave_lock() \
+	asm volatile ("membar #LoadStore | #StoreStore \n\t" : : : "memory");
 #endif /* NOSMP */
 
 

+ 11 - 0
atomic/atomic_x86.h

@@ -57,6 +57,10 @@
 #define membar()	asm volatile ("" : : : "memory")
 #define membar_read()	membar()
 #define membar_write()	membar()
+/* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
+ * contain gcc barriers*/
+#define membar_enter_lock() 
+#define membar_leave_lock()
 
 #else
 
@@ -95,6 +99,13 @@
 
 #endif /* __CPU_x86_64 */
 
+/* lock barrriers: empty, not needed on x86 or x86_64 (atomic ops already
+ *  force the barriers if needed); the lock/unlock should already contain the 
+ *  gcc do_not_cache barriers*/
+#define membar_enter_lock() 
+#define membar_leave_lock()
+
+
 
 
 

+ 28 - 59
atomic_ops.h

@@ -26,9 +26,31 @@
  *  void membar_read()   - load (read) memory barrier
  *  void membar_write()  - store (write) memory barrier
  *
- *  Note: properly using memory barriers is tricky, in general try not to 
+ *  void membar_enter_lock() - memory barrier function that should be 
+ *                             called after a lock operation (where lock is
+ *                             an asm inline function that uses atomic store
+ *                             operation on the lock var.). It is at most
+ *                             a StoreStore|StoreLoad barrier, but could also
+ *                             be empty if an atomic op implies a memory 
+ *                             barrier on the specific arhitecture.
+ *                             Example usage: 
+ *                               raw_lock(l); membar_enter_lock(); ...
+ *  void membar_leave_lock() - memory barrier function that should be called 
+ *                             before an unlock operation (where unlock is an
+ *                             asm inline function that uses at least an atomic
+ *                             store to on the lock var.). It is at most a 
+ *                             LoadStore|StoreStore barrier (but could also be
+ *                             empty, see above).
+ *                             Example: raw_lock(l); membar_enter_lock(); ..
+ *                                      ... critical section ...
+ *                                      membar_leave_lock(); raw_unlock(l);
+ *
+ *  Note: - properly using memory barriers is tricky, in general try not to 
  *        depend on them. Locks include memory barriers, so you don't need
  *        them for writes/load already protected by locks.
+ *        - membar_enter_lock() and membar_leave_lock() are needed only if
+ *        you implement your own locks using atomic ops (ser locks have the
+ *        membars included)
  *
  * atomic operations:
  * ------------------
@@ -89,70 +111,17 @@
  * History:
  * --------
  *  2006-03-08  created by andrei
+ *  2007-05-13  moved some of the decl. and includes into atomic_common.h and 
+ *               atomic_native.h (andrei)
  */
 #ifndef __atomic_ops
 #define __atomic_ops
 
-/* atomic_t defined as a struct to easily catch non atomic ops. on it,
- * e.g.  atomic_t  foo; foo++  will generate a compile error */
-typedef struct{ volatile int val; } atomic_t; 
-
-
-/* store and load operations are atomic on all cpus, note however that they
- * don't include memory barriers so if you want to use atomic_{get,set} 
- * to implement mutexes you must use the mb_* versions or explicitely use
- * the barriers */
-
-#define atomic_set_int(pvar, i) (*(pvar)=i)
-#define atomic_set_long(pvar, i) (*(pvar)=i)
-#define atomic_get_int(pvar) (*(pvar))
-#define atomic_get_long(pvar) (*(pvar))
-
-#define atomic_set(at_var, value)	(atomic_set_int(&((at_var)->val), (value)))
-
-inline static int atomic_get(atomic_t *v)
-{
-	return atomic_get_int(&(v->val));
-}
-
-
-
-#ifdef CC_GCC_LIKE_ASM
-
-#if defined __CPU_i386 || defined __CPU_x86_64
-
-#include "atomic/atomic_x86.h"
-
-#elif defined __CPU_mips2 || defined __CPU_mips64 || \
-	  ( defined __CPU_mips && defined MIPS_HAS_LLSC )
-
-#include "atomic/atomic_mips2.h"
-
-#elif defined __CPU_ppc || defined __CPU_ppc64
-
-#include "atomic/atomic_ppc.h"
-
-#elif defined __CPU_sparc64
-
-#include "atomic/atomic_sparc64.h"
-
-#elif defined __CPU_sparc
-
-#include "atomic/atomic_sparc.h"
-
-#elif defined __CPU_arm || defined __CPU_arm6
-
-#include "atomic/atomic_arm.h"
-
-#elif defined __CPU_alpha
-
-#include "atomic/atomic_alpha.h"
-
-#endif /* __CPU_xxx  => no known cpu */
-
-#endif /* CC_GCC_LIKE_ASM */
+#include "atomic/atomic_common.h"
 
+#include "atomic/atomic_native.h"
 
+/* if no native operations, emulate them using locks */
 #if  ! defined HAVE_ASM_INLINE_ATOMIC_OPS || ! defined HAVE_ASM_INLINE_MEMBAR
 
 #include "atomic/atomic_unknown.h"