pool_monitor.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. // Copyright (c) 2006-2018 Maxim Khizhinsky
  2. //
  3. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  4. // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
  5. #ifndef CDSLIB_SYNC_POOL_MONITOR_H
  6. #define CDSLIB_SYNC_POOL_MONITOR_H
  7. #include <cds/sync/monitor.h>
  8. #include <cds/algo/atomic.h>
  9. #include <cds/algo/backoff_strategy.h>
  10. #include <cds/opt/options.h> // opt::none
  11. namespace cds { namespace sync {
  12. /// \p pool_monitor traits
  13. struct pool_monitor_traits {
  14. /// Dummy internal statistics if \p Stat template parameter is \p false
  15. struct empty_stat
  16. {
  17. //@cond
  18. void onLock() const {}
  19. void onUnlock() const {}
  20. void onLockContention() const {}
  21. void onUnlockContention() const {}
  22. void onLockAllocation() const {}
  23. void onLockDeallocation() const {}
  24. //@endcond
  25. };
  26. /// Monitor's internal statistics, used if \p Stat template parameter is \p true
  27. template <typename Counter = cds::atomicity::event_counter >
  28. struct stat
  29. {
  30. typedef Counter event_counter; ///< measure type
  31. event_counter m_nLockCount; ///< Number of monitor \p lock() call
  32. event_counter m_nUnlockCount; ///< Number of monitor \p unlock() call
  33. event_counter m_nMaxLocked; ///< Max number of simuntaneously locked mutexes
  34. event_counter m_nLockContention; ///< Number of \p lock() contenton
  35. event_counter m_nUnlockContention; ///< Number of \p unlock() contention
  36. event_counter m_nLockAllocation; ///< Number of the lock allocation from the pool
  37. event_counter m_nLockDeallocation; ///< Number of the lock deallocation
  38. event_counter m_nMaxAllocated; ///< Max number of sumultaneously allocated mutexes
  39. //@cond
  40. void onLock()
  41. {
  42. ++m_nLockCount;
  43. int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
  44. if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
  45. m_nMaxLocked = static_cast<typename event_counter::value_type>( nDiff );
  46. }
  47. void onUnlock() { ++m_nUnlockCount; }
  48. void onLockContention() { ++m_nLockContention; }
  49. void onUnlockContention() { ++m_nUnlockContention;}
  50. void onLockAllocation()
  51. {
  52. ++m_nLockAllocation;
  53. int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
  54. if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ))
  55. m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
  56. }
  57. void onLockDeallocation() { ++m_nLockDeallocation;}
  58. //@endcond
  59. };
  60. };
  61. /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
  62. /**
  63. The monitor is intended for reducing the number of system mutexes for
  64. huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
  65. only when container's node should be locked. Lifetime of node's mutex is managed by
  66. reference counter. When the reference counter to node's mutex becomes zero,
  67. the mutex is given back to the pool.
  68. The monitor is blocked: the access to node's mutex is performed under the spin-lock.
  69. However, node locking/unlocking is performed beyond the spin-lock.
  70. Template arguments:
  71. - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
  72. the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
  73. - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::Default
  74. - \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.
  75. <b>How to use</b>
  76. \code
  77. typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
  78. typedef cds::sync::pool_monitor< pool_type > sync_monitor;
  79. \endcode
  80. */
  81. template <class LockPool, typename BackOff = cds::backoff::Default, bool Stat = false >
  82. class pool_monitor
  83. {
  84. public:
  85. typedef LockPool pool_type; ///< Pool type
  86. typedef typename pool_type::value_type lock_type; ///< node lock type
  87. typedef typename std::conditional<
  88. std::is_same< BackOff, cds::opt::none >::value,
  89. cds::backoff::yield,
  90. BackOff
  91. >::type back_off; ///< back-off strategy for spinning
  92. typedef uint32_t refspin_type; ///< Reference counter + spin-lock bit
  93. /// Internal statistics
  94. typedef typename std::conditional<
  95. Stat,
  96. typename pool_monitor_traits::stat<>,
  97. typename pool_monitor_traits::empty_stat
  98. >::type internal_stat;
  99. /// Pool's default capacity
  100. static constexpr size_t const c_nDefaultCapacity = 256;
  101. private:
  102. //@cond
  103. static constexpr refspin_type const c_nSpinBit = 1;
  104. static constexpr refspin_type const c_nRefIncrement = 2;
  105. mutable pool_type m_Pool;
  106. mutable internal_stat m_Stat;
  107. //@endcond
  108. public:
  109. /// Node injection
  110. struct node_injection
  111. {
  112. mutable atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
  113. mutable lock_type * m_pLock; ///< Node-level lock
  114. //@cond
  115. node_injection()
  116. : m_pLock( nullptr )
  117. {
  118. m_RefSpin.store( 0, atomics::memory_order_release );
  119. }
  120. ~node_injection()
  121. {
  122. assert( m_pLock == nullptr );
  123. assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
  124. }
  125. bool check_free() const
  126. {
  127. return m_pLock == nullptr && m_RefSpin.load( atomics::memory_order_relaxed ) == 0;
  128. }
  129. //@endcond
  130. };
  131. /// Initializes the pool of 256 preallocated mutexes
  132. pool_monitor()
  133. : m_Pool( c_nDefaultCapacity )
  134. {}
  135. /// Initializes the pool of \p nPoolCapacity preallocated mutexes
  136. pool_monitor( size_t nPoolCapacity )
  137. : m_Pool( nPoolCapacity ? nPoolCapacity : c_nDefaultCapacity )
  138. {}
  139. /// Makes exclusive access to node \p p
  140. template <typename Node>
  141. void lock( Node const& p ) const
  142. {
  143. lock_type * pLock;
  144. m_Stat.onLock();
  145. // try lock spin and increment reference counter
  146. refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
  147. if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
  148. atomics::memory_order_acq_rel, atomics::memory_order_acquire ))
  149. {
  150. back_off bkoff;
  151. do {
  152. m_Stat.onLockContention();
  153. bkoff();
  154. cur &= ~c_nSpinBit;
  155. } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
  156. atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
  157. }
  158. // spin locked
  159. // If the node has no lock, allocate it from pool
  160. pLock = p.m_SyncMonitorInjection.m_pLock;
  161. if ( !pLock ) {
  162. assert( cur == 0 );
  163. pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
  164. assert( pLock != nullptr );
  165. m_Stat.onLockAllocation();
  166. }
  167. // unlock spin
  168. p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
  169. // lock the node
  170. pLock->lock();
  171. }
  172. /// Unlocks the node \p p
  173. template <typename Node>
  174. void unlock( Node const& p ) const
  175. {
  176. lock_type * pLock = nullptr;
  177. m_Stat.onUnlock();
  178. assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
  179. p.m_SyncMonitorInjection.m_pLock->unlock();
  180. // try lock spin
  181. refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
  182. if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
  183. atomics::memory_order_acquire, atomics::memory_order_acquire ))
  184. {
  185. back_off bkoff;
  186. do {
  187. m_Stat.onUnlockContention();
  188. bkoff();
  189. cur &= ~c_nSpinBit;
  190. } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
  191. atomics::memory_order_acquire, atomics::memory_order_acquire ));
  192. }
  193. // spin locked now
  194. // If we are the unique owner - deallocate lock
  195. if ( cur == c_nRefIncrement ) {
  196. pLock = p.m_SyncMonitorInjection.m_pLock;
  197. p.m_SyncMonitorInjection.m_pLock = nullptr;
  198. }
  199. // unlock spin
  200. p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
  201. // free pLock
  202. if ( pLock ) {
  203. m_Pool.deallocate( pLock, 1 );
  204. m_Stat.onLockDeallocation();
  205. }
  206. }
  207. /// Scoped lock
  208. template <typename Node>
  209. using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
  210. /// Returns the reference to internal statistics
  211. /**
  212. If class' template argument \p Stat is \p false,
  213. the function returns \ref pool_monitor_traits::empty_stat "dummy statistics".
  214. Otherwise, it returns the reference to monitor's internal statistics
  215. of type \ref pool_monitor_traits::stat.
  216. */
  217. internal_stat const& statistics() const
  218. {
  219. return m_Stat;
  220. }
  221. };
  222. }} // namespace cds::sync
  223. #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H