spinlock.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. // Copyright (c) 2006-2018 Maxim Khizhinsky
  2. //
  3. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  4. // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
  5. #ifndef CDSLIB_SYNC_SPINLOCK_H
  6. #define CDSLIB_SYNC_SPINLOCK_H
  7. #include <cds/algo/atomic.h>
  8. #include <cds/os/thread.h>
  9. #include <cds/algo/backoff_strategy.h>
  10. namespace cds {
  11. /// Synchronization primitives
  12. namespace sync {
  13. /// Spin lock
  14. /**
  15. Simple and light-weight spin-lock critical section
  16. It is useful to gain access to small (short-timed) code
  17. Algorithm:
  18. TATAS (test-and-test-and-lock)
  19. [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
  20. No serialization performed - any of waiting threads may owns the spin-lock.
  21. This spin-lock is NOT recursive: the thread owned the lock cannot call \p lock() method without deadlock.
  22. The method \p unlock() can call any thread
  23. DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
  24. - double lock attempt encountered by same thread (deadlock)
  25. - unlock by another thread
  26. If spin-lock is locked the \p Backoff algorithm is called. Predefined \p backoff::LockDefault class yields current
  27. thread and repeats lock attempts later
  28. Template parameters:
  29. - \p Backoff - backoff strategy. Used when spin lock is locked
  30. */
  31. template <typename Backoff >
  32. class spin_lock
  33. {
  34. public:
  35. typedef Backoff backoff_strategy; ///< back-off strategy type
  36. private:
  37. atomics::atomic<bool> m_spin; ///< Spin
  38. # ifdef CDS_DEBUG
  39. typename OS::ThreadId m_dbgOwnerId; ///< Owner thread id (only for debug mode)
  40. # endif
  41. public:
  42. /// Construct free (unlocked) spin-lock
  43. spin_lock() noexcept
  44. # ifdef CDS_DEBUG
  45. : m_dbgOwnerId( OS::c_NullThreadId )
  46. # endif
  47. {
  48. m_spin.store( false, atomics::memory_order_release );
  49. }
  50. /// Construct spin-lock in specified state
  51. /**
  52. In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
  53. */
  54. explicit spin_lock( bool bLocked ) noexcept
  55. # ifdef CDS_DEBUG
  56. : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
  57. # endif
  58. {
  59. m_spin.store( bLocked, atomics::memory_order_release );
  60. }
  61. /// Dummy copy constructor
  62. /**
  63. The ctor initializes the spin to free (unlocked) state like the default ctor.
  64. */
  65. spin_lock(const spin_lock<Backoff>& ) noexcept
  66. : m_spin( false )
  67. # ifdef CDS_DEBUG
  68. , m_dbgOwnerId( cds::OS::c_NullThreadId )
  69. # endif
  70. {
  71. CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
  72. }
  73. /// Destructor. On debug time it checks whether spin-lock is free
  74. ~spin_lock()
  75. {
  76. assert( !m_spin.load( atomics::memory_order_relaxed ));
  77. CDS_TSAN_ANNOTATE_MUTEX_DESTROY( this );
  78. }
  79. /// Check if the spin is locked
  80. bool is_locked() const noexcept
  81. {
  82. return m_spin.load( atomics::memory_order_relaxed );
  83. }
  84. /// Try to lock the object
  85. /**
  86. Returns \p true if locking is succeeded
  87. otherwise (if the spin is already locked) returns \p false
  88. Debug version: deadlock can be detected
  89. */
  90. bool try_lock() noexcept
  91. {
  92. # ifdef CDS_THREAD_SANITIZER_ENABLED
  93. bool bCurrent = m_spin.exchange( true, atomics::memory_order_acq_rel );
  94. if ( !bCurrent )
  95. CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( this );
  96. # else
  97. bool bCurrent = m_spin.exchange( true, atomics::memory_order_acquire );
  98. # endif
  99. CDS_DEBUG_ONLY(
  100. if ( !bCurrent ) {
  101. m_dbgOwnerId = OS::get_current_thread_id();
  102. }
  103. )
  104. return !bCurrent;
  105. }
  106. /// Try to lock the object, repeat \p nTryCount times if failed
  107. /**
  108. Returns \p true if locking is succeeded
  109. otherwise (if the spin is already locked) returns \p false
  110. */
  111. bool try_lock( unsigned int nTryCount ) noexcept( noexcept( backoff_strategy()()))
  112. {
  113. backoff_strategy backoff;
  114. while ( nTryCount-- ) {
  115. if ( try_lock())
  116. return true;
  117. backoff();
  118. }
  119. return false;
  120. }
  121. /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
  122. void lock() noexcept(noexcept( backoff_strategy()()))
  123. {
  124. backoff_strategy backoff;
  125. // Deadlock detected
  126. CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
  127. assert( m_dbgOwnerId != OS::get_current_thread_id());
  128. CDS_TSAN_ANNOTATE_IGNORE_READS_END;
  129. // TATAS algorithm
  130. while ( !try_lock()) {
  131. while ( m_spin.load( atomics::memory_order_acquire ))
  132. backoff();
  133. }
  134. assert( m_dbgOwnerId == OS::get_current_thread_id());
  135. }
  136. /// Unlock the spin-lock. Debug version: deadlock may be detected
  137. void unlock() noexcept
  138. {
  139. assert( m_spin.load( atomics::memory_order_relaxed ));
  140. assert( m_dbgOwnerId == OS::get_current_thread_id());
  141. CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
  142. CDS_TSAN_ANNOTATE_MUTEX_RELEASED( this );
  143. m_spin.store( false, atomics::memory_order_release );
  144. }
  145. };
  146. /// Spin-lock implementation default for the current platform
  147. typedef spin_lock<backoff::LockDefault > spin;
  148. /// Recursive spin lock.
  149. /**
  150. Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
  151. Template parameters:
  152. - \p Integral one of integral atomic type: <tt>unsigned int</tt>, \p int, and others
  153. - \p Backoff backoff strategy. Used when spin lock is locked
  154. */
  155. template <typename Integral, class Backoff>
  156. class reentrant_spin_lock
  157. {
  158. typedef OS::ThreadId thread_id; ///< The type of thread id
  159. public:
  160. typedef Integral integral_type; ///< The integral type
  161. typedef Backoff backoff_strategy; ///< The backoff type
  162. private:
  163. //@cond
  164. atomics::atomic<integral_type> m_spin; ///< spin-lock atomic
  165. atomics::atomic<thread_id> m_OwnerId; ///< Owner thread id. If spin-lock is not locked it usually equals to \p OS::c_NullThreadId
  166. //@endcond
  167. private:
  168. //@cond
  169. void take( thread_id tid ) noexcept
  170. {
  171. m_OwnerId.store( tid, atomics::memory_order_relaxed );
  172. }
  173. void free() noexcept
  174. {
  175. m_OwnerId.store( OS::c_NullThreadId, atomics::memory_order_relaxed );
  176. }
  177. bool is_taken( thread_id tid ) const noexcept
  178. {
  179. return m_OwnerId.load( atomics::memory_order_relaxed ) == tid;
  180. }
  181. bool try_taken_lock( thread_id tid ) noexcept
  182. {
  183. if ( is_taken( tid )) {
  184. m_spin.fetch_add( 1, atomics::memory_order_relaxed );
  185. return true;
  186. }
  187. return false;
  188. }
  189. bool try_acquire() noexcept
  190. {
  191. integral_type nCurrent = 0;
  192. bool bRet = m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_acquire );
  193. # ifdef CDS_THREAD_SANITIZER_ENABLED
  194. if ( bRet )
  195. CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( this );
  196. # endif
  197. return bRet;
  198. }
  199. bool try_acquire( unsigned int nTryCount ) noexcept( noexcept( backoff_strategy()()))
  200. {
  201. backoff_strategy bkoff;
  202. while ( nTryCount-- ) {
  203. if ( try_acquire())
  204. return true;
  205. bkoff();
  206. }
  207. return false;
  208. }
  209. void acquire() noexcept( noexcept( backoff_strategy()()))
  210. {
  211. // TATAS algorithm
  212. backoff_strategy bkoff;
  213. while ( !try_acquire()) {
  214. while ( m_spin.load( atomics::memory_order_acquire ))
  215. bkoff();
  216. }
  217. }
  218. //@endcond
  219. public:
  220. /// Default constructor initializes spin to free (unlocked) state
  221. reentrant_spin_lock() noexcept
  222. : m_spin(0)
  223. , m_OwnerId( OS::c_NullThreadId )
  224. {
  225. CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
  226. }
  227. /// Dummy copy constructor
  228. /**
  229. In theory, spin-lock cannot be copied. However, it is not practical.
  230. Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
  231. initializes the spin to free (unlocked) state like default ctor.
  232. */
  233. reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) noexcept
  234. : m_spin(0)
  235. , m_OwnerId( OS::c_NullThreadId )
  236. {
  237. CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
  238. }
  239. /// Construct object in specified state
  240. explicit reentrant_spin_lock( bool bLocked )
  241. : m_spin(0)
  242. , m_OwnerId( OS::c_NullThreadId )
  243. {
  244. CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
  245. if ( bLocked )
  246. lock();
  247. }
  248. /// Dtor. Spin-lock must be unlocked
  249. ~reentrant_spin_lock()
  250. {
  251. assert( m_spin.load( atomics::memory_order_acquire ) == 0 );
  252. assert( m_OwnerId.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId );
  253. CDS_TSAN_ANNOTATE_MUTEX_DESTROY( this );
  254. }
  255. /// Checks if the spin is locked
  256. /**
  257. The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
  258. Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
  259. */
  260. bool is_locked() const noexcept
  261. {
  262. return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id()));
  263. }
  264. /// Try to lock the spin-lock
  265. bool try_lock() noexcept( noexcept( std::declval<reentrant_spin_lock>().try_acquire()))
  266. {
  267. thread_id tid = OS::get_current_thread_id();
  268. if ( try_taken_lock( tid ))
  269. return true;
  270. if ( try_acquire()) {
  271. take( tid );
  272. return true;
  273. }
  274. return false;
  275. }
  276. /// Try to lock up to \p nTryCount attempts
  277. bool try_lock( unsigned int nTryCount ) noexcept( noexcept( std::declval<reentrant_spin_lock>().try_acquire( nTryCount )))
  278. {
  279. thread_id tid = OS::get_current_thread_id();
  280. if ( try_taken_lock( tid ))
  281. return true;
  282. if ( try_acquire( nTryCount )) {
  283. take( tid );
  284. return true;
  285. }
  286. return false;
  287. }
  288. /// Lock the object waits if it is busy
  289. void lock() noexcept( noexcept( std::declval<reentrant_spin_lock>().acquire()))
  290. {
  291. thread_id tid = OS::get_current_thread_id();
  292. if ( !try_taken_lock( tid )) {
  293. acquire();
  294. take( tid );
  295. }
  296. }
  297. /// Unlock the spin-lock
  298. void unlock() noexcept
  299. {
  300. assert( is_taken( OS::get_current_thread_id()));
  301. integral_type n = m_spin.load( atomics::memory_order_relaxed );
  302. if ( n > 1 )
  303. m_spin.store( n - 1, atomics::memory_order_relaxed );
  304. else {
  305. free();
  306. CDS_TSAN_ANNOTATE_MUTEX_RELEASED( this );
  307. m_spin.store( 0, atomics::memory_order_release );
  308. }
  309. }
  310. /// Change the owner of locked spin-lock. May be called by thread that owns spin-lock
  311. void change_owner( OS::ThreadId newOwnerId ) noexcept
  312. {
  313. assert( is_taken( OS::get_current_thread_id()));
  314. assert( newOwnerId != OS::c_NullThreadId );
  315. m_OwnerId.store( newOwnerId, atomics::memory_order_relaxed );
  316. }
  317. };
  318. /// Recursive 32bit spin-lock
  319. typedef reentrant_spin_lock<uint32_t, backoff::LockDefault> reentrant_spin32;
  320. /// Default recursive spin-lock
  321. typedef reentrant_spin32 reentrant_spin;
  322. /// Recursive 64bit spin-lock
  323. typedef reentrant_spin_lock<uint64_t, backoff::LockDefault> reentrant_spin64;
  324. } // namespace sync
  325. } // namespace cds
  326. #endif // #ifndef CDSLIB_SYNC_SPINLOCK_H