push_pop.cpp 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. // Copyright (c) 2006-2018 Maxim Khizhinsky
  2. //
  3. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  4. // file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
  5. #include "stack_type.h"
  6. namespace {
  7. static size_t s_nPushThreadCount = 4;
  8. static size_t s_nPopThreadCount = 4;
  9. static size_t s_nStackSize = 1000000;
  10. static size_t s_nEliminationSize = 4;
  11. static atomics::atomic<size_t> s_nWorkingProducers( 0 );
  12. class stack_push_pop : public cds_test::stress_fixture
  13. {
  14. protected:
  15. enum thread_type
  16. {
  17. producer_thread,
  18. consumer_thread
  19. };
  20. struct value_type {
  21. size_t nNo;
  22. size_t nThread;
  23. value_type()
  24. : nNo( 0 )
  25. , nThread( 0 )
  26. {}
  27. value_type( size_t n )
  28. : nNo( n )
  29. , nThread( 0 )
  30. {}
  31. };
  32. static size_t const c_nValArraySize = 1024;
  33. template <class Stack>
  34. class Producer: public cds_test::thread
  35. {
  36. typedef cds_test::thread base_class;
  37. public:
  38. Producer( cds_test::thread_pool& pool, Stack& stack, size_t push_count )
  39. : base_class( pool, producer_thread )
  40. , m_stack( stack )
  41. , m_nItemCount( push_count )
  42. , m_nPushError( 0 )
  43. {}
  44. Producer( Producer& src )
  45. : base_class( src )
  46. , m_stack( src.m_stack )
  47. , m_nItemCount( src.m_nItemCount )
  48. , m_nPushError( 0 )
  49. {}
  50. virtual thread * clone()
  51. {
  52. return new Producer( *this );
  53. }
  54. virtual void test()
  55. {
  56. memset( m_arrPush, 0, sizeof( m_arrPush ));
  57. value_type v;
  58. v.nThread = id();
  59. for ( size_t i = 0; i < m_nItemCount; ++i ) {
  60. v.nNo = i % c_nValArraySize;
  61. if ( m_stack.push( v ))
  62. ++m_arrPush[v.nNo];
  63. else
  64. ++m_nPushError;
  65. }
  66. s_nWorkingProducers.fetch_sub( 1, atomics::memory_order_release );
  67. }
  68. public:
  69. Stack& m_stack;
  70. size_t const m_nItemCount;
  71. size_t m_nPushError;
  72. size_t m_arrPush[c_nValArraySize];
  73. };
  74. template <class Stack>
  75. class Consumer : public cds_test::thread
  76. {
  77. typedef cds_test::thread base_class;
  78. public:
  79. Consumer( cds_test::thread_pool& pool, Stack& stack )
  80. : base_class( pool, consumer_thread )
  81. , m_stack( stack )
  82. , m_nPopCount( 0 )
  83. , m_nPopEmpty( 0 )
  84. , m_nDirtyPop( 0 )
  85. {}
  86. Consumer( Consumer& src )
  87. : base_class( src )
  88. , m_stack( src.m_stack )
  89. , m_nPopCount( 0 )
  90. , m_nPopEmpty( 0 )
  91. , m_nDirtyPop( 0 )
  92. {}
  93. virtual thread * clone()
  94. {
  95. return new Consumer( *this );
  96. }
  97. virtual void test()
  98. {
  99. memset( m_arrPop, 0, sizeof( m_arrPop ));
  100. value_type v;
  101. while ( !( s_nWorkingProducers.load( atomics::memory_order_acquire ) == 0 && m_stack.empty())) {
  102. if ( m_stack.pop( v )) {
  103. ++m_nPopCount;
  104. if ( v.nNo < sizeof( m_arrPop ) / sizeof( m_arrPop[0] ))
  105. ++m_arrPop[v.nNo];
  106. else
  107. ++m_nDirtyPop;
  108. }
  109. else
  110. ++m_nPopEmpty;
  111. }
  112. }
  113. public:
  114. Stack& m_stack;
  115. size_t m_nPopCount;
  116. size_t m_nPopEmpty;
  117. size_t m_arrPop[c_nValArraySize];
  118. size_t m_nDirtyPop;
  119. };
  120. protected:
  121. static void SetUpTestCase()
  122. {
  123. cds_test::config const& cfg = get_config("Stack_PushPop");
  124. s_nPushThreadCount = cfg.get_size_t( "PushThreadCount", s_nPushThreadCount );
  125. s_nPopThreadCount = cfg.get_size_t( "PopThreadCount", s_nPopThreadCount );
  126. s_nStackSize = cfg.get_size_t( "StackSize", s_nStackSize );
  127. s_nEliminationSize = cfg.get_size_t( "EliminationSize", s_nEliminationSize );
  128. if ( s_nPushThreadCount == 0 )
  129. s_nPushThreadCount = 1;
  130. if ( s_nPopThreadCount == 0 )
  131. s_nPopThreadCount = 1;
  132. }
  133. //static void TearDownTestCase();
  134. template <typename Stack>
  135. void test( Stack& stack )
  136. {
  137. cds_test::thread_pool& pool = get_pool();
  138. size_t const nPushCount = s_nStackSize / s_nPushThreadCount;
  139. pool.add( new Producer<Stack>( pool, stack, nPushCount ), s_nPushThreadCount );
  140. pool.add( new Consumer<Stack>( pool, stack ), s_nPopThreadCount );
  141. s_nWorkingProducers.store( s_nPushThreadCount );
  142. s_nStackSize = nPushCount * s_nPushThreadCount;
  143. propout() << std::make_pair( "producer_thread_count", s_nPushThreadCount )
  144. << std::make_pair( "consumer_thread_count", s_nPopThreadCount )
  145. << std::make_pair( "push_count", s_nStackSize )
  146. ;
  147. std::chrono::milliseconds duration = pool.run();
  148. propout() << std::make_pair( "duration", duration );
  149. analyze( stack );
  150. propout() << stack.statistics();
  151. }
  152. template <typename Stack>
  153. void test_elimination( Stack& stack )
  154. {
  155. test( stack );
  156. check_elimination_stat( stack.statistics());
  157. }
  158. void check_elimination_stat( cds::container::treiber_stack::empty_stat const& )
  159. {}
  160. void check_elimination_stat( cds::container::treiber_stack::stat<> const& s )
  161. {
  162. EXPECT_EQ( s.m_PushCount.get() + s.m_ActivePushCollision.get() + s.m_PassivePushCollision.get(), s_nStackSize );
  163. EXPECT_EQ( s.m_PopCount.get() + s.m_ActivePopCollision.get() + s.m_PassivePopCollision.get(), s_nStackSize );
  164. EXPECT_EQ( s.m_PushCount.get(), s.m_PopCount.get());
  165. EXPECT_EQ( s.m_ActivePopCollision.get(), s.m_PassivePushCollision.get());
  166. EXPECT_EQ( s.m_ActivePushCollision.get(), s.m_PassivePopCollision.get());
  167. }
  168. template< class Stack>
  169. void analyze( Stack& /*stack*/ )
  170. {
  171. cds_test::thread_pool& pool = get_pool();
  172. size_t nPushError = 0;
  173. size_t nPopEmpty = 0;
  174. size_t nPopCount = 0;
  175. size_t arrVal[c_nValArraySize];
  176. memset( arrVal, 0, sizeof( arrVal ));
  177. size_t nDirtyPop = 0;
  178. for ( size_t threadNo = 0; threadNo < pool.size(); ++threadNo ) {
  179. cds_test::thread& thread = pool.get( threadNo );
  180. if ( thread.type() == producer_thread ) {
  181. Producer<Stack>& producer = static_cast<Producer<Stack>&>( thread );
  182. nPushError += producer.m_nPushError;
  183. for ( size_t i = 0; i < c_nValArraySize; ++i )
  184. arrVal[i] += producer.m_arrPush[i];
  185. }
  186. else {
  187. ASSERT_EQ( thread.type(), consumer_thread );
  188. Consumer<Stack>& consumer = static_cast<Consumer<Stack>&>(thread);
  189. nPopEmpty += consumer.m_nPopEmpty;
  190. nPopCount += consumer.m_nPopCount;
  191. nDirtyPop += consumer.m_nDirtyPop;
  192. for ( size_t i = 0; i < c_nValArraySize; ++i )
  193. arrVal[i] -= consumer.m_arrPop[i];
  194. }
  195. }
  196. EXPECT_EQ( nPopCount, s_nStackSize );
  197. EXPECT_EQ( nDirtyPop, 0u );
  198. for ( size_t i = 0; i < sizeof( arrVal ) / sizeof( arrVal[0] ); ++i ) {
  199. EXPECT_EQ( arrVal[i], 0u );
  200. }
  201. propout() << std::make_pair( "push_count", s_nStackSize )
  202. << std::make_pair( "push_error", nPushError )
  203. << std::make_pair( "pop_empty", nPopEmpty )
  204. << std::make_pair( "dirty_pop", nDirtyPop )
  205. ;
  206. }
  207. };
  208. CDSSTRESS_TreiberStack( stack_push_pop )
  209. CDSSTRESS_EliminationStack( stack_push_pop )
  210. CDSSTRESS_FCStack( stack_push_pop )
  211. CDSSTRESS_FCDeque( stack_push_pop )
  212. CDSSTRESS_StdStack( stack_push_pop )
  213. } // namespace