|
@@ -31,8 +31,15 @@
|
|
|
#ifndef SPIN_LOCK_H
|
|
|
#define SPIN_LOCK_H
|
|
|
|
|
|
+#include "core/os/thread.h"
|
|
|
#include "core/typedefs.h"
|
|
|
|
|
|
+#ifdef THREADS_ENABLED
|
|
|
+
|
|
|
+// Note the implementations below avoid false sharing by ensuring their
|
|
|
+// sizes match the assumed cache line. We can't use align attributes
|
|
|
+// because these objects may end up unaligned in semi-tightly packed arrays.
|
|
|
+
|
|
|
#ifdef _MSC_VER
|
|
|
#include <intrin.h>
|
|
|
#endif
|
|
@@ -42,7 +49,10 @@
|
|
|
#include <os/lock.h>
|
|
|
|
|
|
class SpinLock {
|
|
|
- mutable os_unfair_lock _lock = OS_UNFAIR_LOCK_INIT;
|
|
|
+ union {
|
|
|
+ mutable os_unfair_lock _lock = OS_UNFAIR_LOCK_INIT;
|
|
|
+ char aligner[Thread::CACHE_LINE_BYTES];
|
|
|
+ };
|
|
|
|
|
|
public:
|
|
|
_ALWAYS_INLINE_ void lock() const {
|
|
@@ -54,9 +64,7 @@ public:
|
|
|
}
|
|
|
};
|
|
|
|
|
|
-#else
|
|
|
-
|
|
|
-#include "core/os/thread.h"
|
|
|
+#else // __APPLE__
|
|
|
|
|
|
#include <atomic>
|
|
|
|
|
@@ -84,8 +92,11 @@ _ALWAYS_INLINE_ static void _cpu_pause() {
|
|
|
|
|
|
static_assert(std::atomic_bool::is_always_lock_free);
|
|
|
|
|
|
-class alignas(Thread::CACHE_LINE_BYTES) SpinLock {
|
|
|
- mutable std::atomic<bool> locked = ATOMIC_VAR_INIT(false);
|
|
|
+class SpinLock {
|
|
|
+ union {
|
|
|
+ mutable std::atomic<bool> locked = ATOMIC_VAR_INIT(false);
|
|
|
+ char aligner[Thread::CACHE_LINE_BYTES];
|
|
|
+ };
|
|
|
|
|
|
public:
|
|
|
_ALWAYS_INLINE_ void lock() const {
|
|
@@ -107,4 +118,14 @@ public:
|
|
|
|
|
|
#endif // __APPLE__
|
|
|
|
|
|
+#else // THREADS_ENABLED
|
|
|
+
|
|
|
+class SpinLock {
|
|
|
+public:
|
|
|
+ void lock() const {}
|
|
|
+ void unlock() const {}
|
|
|
+};
|
|
|
+
|
|
|
+#endif // THREADS_ENABLED
|
|
|
+
|
|
|
#endif // SPIN_LOCK_H
|