pipeline.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. Copyright (c) 2005-2020 Intel Corporation
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. #ifndef __TBB_pipeline_H
  14. #define __TBB_pipeline_H
  15. #define __TBB_pipeline_H_include_area
  16. #include "internal/_warning_suppress_enable_notice.h"
  17. #include "atomic.h"
  18. #include "task.h"
  19. #include "tbb_allocator.h"
  20. #include <cstddef>
  21. #if __TBB_CPP11_TYPE_PROPERTIES_PRESENT
  22. #include <type_traits>
  23. #endif
  24. namespace tbb {
  25. class pipeline;
  26. class filter;
  27. //! @cond INTERNAL
  28. namespace internal {
  29. // The argument for PIPELINE_VERSION should be an integer between 2 and 9
  30. #define __TBB_PIPELINE_VERSION(x) ((unsigned char)(x-2)<<1)
  31. typedef unsigned long Token;
  32. typedef long tokendiff_t;
  33. class stage_task;
  34. class input_buffer;
  35. class pipeline_root_task;
  36. class pipeline_cleaner;
  37. } // namespace internal
  38. namespace interface6 {
  39. template<typename T, typename U> class filter_t;
  40. namespace internal {
  41. class pipeline_proxy;
  42. }
  43. }
  44. //! @endcond
  45. //! A stage in a pipeline.
  46. /** @ingroup algorithms */
  47. class filter: internal::no_copy {
  48. private:
  49. //! Value used to mark "not in pipeline"
  50. static filter* not_in_pipeline() { return reinterpret_cast<filter*>(intptr_t(-1)); }
  51. protected:
  52. //! The lowest bit 0 is for parallel vs. serial
  53. static const unsigned char filter_is_serial = 0x1;
  54. //! 4th bit distinguishes ordered vs unordered filters.
  55. /** The bit was not set for parallel filters in TBB 2.1 and earlier,
  56. but is_ordered() function always treats parallel filters as out of order. */
  57. static const unsigned char filter_is_out_of_order = 0x1<<4;
  58. //! 5th bit distinguishes thread-bound and regular filters.
  59. static const unsigned char filter_is_bound = 0x1<<5;
  60. //! 6th bit marks input filters emitting small objects
  61. static const unsigned char filter_may_emit_null = 0x1<<6;
  62. //! 7th bit defines exception propagation mode expected by the application.
  63. static const unsigned char exact_exception_propagation =
  64. #if TBB_USE_CAPTURED_EXCEPTION
  65. 0x0;
  66. #else
  67. 0x1<<7;
  68. #endif /* TBB_USE_CAPTURED_EXCEPTION */
  69. static const unsigned char current_version = __TBB_PIPELINE_VERSION(5);
  70. static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for version
  71. public:
  72. enum mode {
  73. //! processes multiple items in parallel and in no particular order
  74. parallel = current_version | filter_is_out_of_order,
  75. //! processes items one at a time; all such filters process items in the same order
  76. serial_in_order = current_version | filter_is_serial,
  77. //! processes items one at a time and in no particular order
  78. serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order,
  79. //! @deprecated use serial_in_order instead
  80. serial = serial_in_order
  81. };
  82. protected:
  83. explicit filter( bool is_serial_ ) :
  84. next_filter_in_pipeline(not_in_pipeline()),
  85. my_input_buffer(NULL),
  86. my_filter_mode(static_cast<unsigned char>((is_serial_ ? serial : parallel) | exact_exception_propagation)),
  87. prev_filter_in_pipeline(not_in_pipeline()),
  88. my_pipeline(NULL),
  89. next_segment(NULL)
  90. {}
  91. explicit filter( mode filter_mode ) :
  92. next_filter_in_pipeline(not_in_pipeline()),
  93. my_input_buffer(NULL),
  94. my_filter_mode(static_cast<unsigned char>(filter_mode | exact_exception_propagation)),
  95. prev_filter_in_pipeline(not_in_pipeline()),
  96. my_pipeline(NULL),
  97. next_segment(NULL)
  98. {}
  99. // signal end-of-input for concrete_filters
  100. void __TBB_EXPORTED_METHOD set_end_of_input();
  101. public:
  102. //! True if filter is serial.
  103. bool is_serial() const {
  104. return bool( my_filter_mode & filter_is_serial );
  105. }
  106. //! True if filter must receive stream in order.
  107. bool is_ordered() const {
  108. return (my_filter_mode & (filter_is_out_of_order|filter_is_serial))==filter_is_serial;
  109. }
  110. //! True if filter is thread-bound.
  111. bool is_bound() const {
  112. return ( my_filter_mode & filter_is_bound )==filter_is_bound;
  113. }
  114. //! true if an input filter can emit null
  115. bool object_may_be_null() {
  116. return ( my_filter_mode & filter_may_emit_null ) == filter_may_emit_null;
  117. }
  118. //! Operate on an item from the input stream, and return item for output stream.
  119. /** Returns NULL if filter is a sink. */
  120. virtual void* operator()( void* item ) = 0;
  121. //! Destroy filter.
  122. /** If the filter was added to a pipeline, the pipeline must be destroyed first. */
  123. virtual __TBB_EXPORTED_METHOD ~filter();
  124. #if __TBB_TASK_GROUP_CONTEXT
  125. //! Destroys item if pipeline was cancelled.
  126. /** Required to prevent memory leaks.
  127. Note it can be called concurrently even for serial filters.*/
  128. virtual void finalize( void* /*item*/ ) {}
  129. #endif
  130. private:
  131. //! Pointer to next filter in the pipeline.
  132. filter* next_filter_in_pipeline;
  133. //! has the filter not yet processed all the tokens it will ever see?
  134. // (pipeline has not yet reached end_of_input or this filter has not yet
  135. // seen the last token produced by input_filter)
  136. bool has_more_work();
  137. //! Buffer for incoming tokens, or NULL if not required.
  138. /** The buffer is required if the filter is serial or follows a thread-bound one. */
  139. internal::input_buffer* my_input_buffer;
  140. friend class internal::stage_task;
  141. friend class internal::pipeline_root_task;
  142. friend class pipeline;
  143. friend class thread_bound_filter;
  144. //! Storage for filter mode and dynamically checked implementation version.
  145. const unsigned char my_filter_mode;
  146. //! Pointer to previous filter in the pipeline.
  147. filter* prev_filter_in_pipeline;
  148. //! Pointer to the pipeline.
  149. pipeline* my_pipeline;
  150. //! Pointer to the next "segment" of filters, or NULL if not required.
  151. /** In each segment, the first filter is not thread-bound but follows a thread-bound one. */
  152. filter* next_segment;
  153. };
  154. //! A stage in a pipeline served by a user thread.
  155. /** @ingroup algorithms */
  156. class thread_bound_filter: public filter {
  157. public:
  158. enum result_type {
  159. // item was processed
  160. success,
  161. // item is currently not available
  162. item_not_available,
  163. // there are no more items to process
  164. end_of_stream
  165. };
  166. protected:
  167. explicit thread_bound_filter(mode filter_mode):
  168. filter(static_cast<mode>(filter_mode | filter::filter_is_bound))
  169. {
  170. __TBB_ASSERT(filter_mode & filter::filter_is_serial, "thread-bound filters must be serial");
  171. }
  172. public:
  173. //! If a data item is available, invoke operator() on that item.
  174. /** This interface is non-blocking.
  175. Returns 'success' if an item was processed.
  176. Returns 'item_not_available' if no item can be processed now
  177. but more may arrive in the future, or if token limit is reached.
  178. Returns 'end_of_stream' if there are no more items to process. */
  179. result_type __TBB_EXPORTED_METHOD try_process_item();
  180. //! Wait until a data item becomes available, and invoke operator() on that item.
  181. /** This interface is blocking.
  182. Returns 'success' if an item was processed.
  183. Returns 'end_of_stream' if there are no more items to process.
  184. Never returns 'item_not_available', as it blocks until another return condition applies. */
  185. result_type __TBB_EXPORTED_METHOD process_item();
  186. private:
  187. //! Internal routine for item processing
  188. result_type internal_process_item(bool is_blocking);
  189. };
  190. //! A processing pipeline that applies filters to items.
  191. /** @ingroup algorithms */
  192. class __TBB_DEPRECATED_MSG("tbb::pipeline is deprecated, use tbb::parallel_pipeline") pipeline {
  193. public:
  194. //! Construct empty pipeline.
  195. __TBB_EXPORTED_METHOD pipeline();
  196. /** Though the current implementation declares the destructor virtual, do not rely on this
  197. detail. The virtualness is deprecated and may disappear in future versions of TBB. */
  198. virtual __TBB_EXPORTED_METHOD ~pipeline();
  199. //! Add filter to end of pipeline.
  200. void __TBB_EXPORTED_METHOD add_filter( filter& filter_ );
  201. //! Run the pipeline to completion.
  202. void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens );
  203. #if __TBB_TASK_GROUP_CONTEXT
  204. //! Run the pipeline to completion with user-supplied context.
  205. void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context& context );
  206. #endif
  207. //! Remove all filters from the pipeline.
  208. void __TBB_EXPORTED_METHOD clear();
  209. private:
  210. friend class internal::stage_task;
  211. friend class internal::pipeline_root_task;
  212. friend class filter;
  213. friend class thread_bound_filter;
  214. friend class internal::pipeline_cleaner;
  215. friend class tbb::interface6::internal::pipeline_proxy;
  216. //! Pointer to first filter in the pipeline.
  217. filter* filter_list;
  218. //! Pointer to location where address of next filter to be added should be stored.
  219. filter* filter_end;
  220. //! task who's reference count is used to determine when all stages are done.
  221. task* end_counter;
  222. //! Number of idle tokens waiting for input stage.
  223. atomic<internal::Token> input_tokens;
  224. //! Global counter of tokens
  225. atomic<internal::Token> token_counter;
  226. //! False until fetch_input returns NULL.
  227. bool end_of_input;
  228. //! True if the pipeline contains a thread-bound filter; false otherwise.
  229. bool has_thread_bound_filters;
  230. //! Remove filter from pipeline.
  231. void remove_filter( filter& filter_ );
  232. //! Not used, but retained to satisfy old export files.
  233. void __TBB_EXPORTED_METHOD inject_token( task& self );
  234. #if __TBB_TASK_GROUP_CONTEXT
  235. //! Does clean up if pipeline is cancelled or exception occurred
  236. void clear_filters();
  237. #endif
  238. };
  239. //------------------------------------------------------------------------
  240. // Support for lambda-friendly parallel_pipeline interface
  241. //------------------------------------------------------------------------
  242. namespace flow {
  243. namespace interface11 {
  244. template<typename Output> class input_node;
  245. }
  246. }
  247. namespace interface6 {
  248. namespace internal {
  249. template<typename T, typename U, typename Body> class concrete_filter;
  250. }
  251. //! input_filter control to signal end-of-input for parallel_pipeline
  252. class flow_control {
  253. bool is_pipeline_stopped;
  254. flow_control() { is_pipeline_stopped = false; }
  255. template<typename T, typename U, typename Body> friend class internal::concrete_filter;
  256. template<typename Output> friend class flow::interface11::input_node;
  257. public:
  258. void stop() { is_pipeline_stopped = true; }
  259. };
  260. //! @cond INTERNAL
  261. namespace internal {
  262. // Emulate std::is_trivially_copyable (false positives not allowed, false negatives suboptimal but safe).
  263. #if __TBB_CPP11_TYPE_PROPERTIES_PRESENT
  264. template<typename T> struct tbb_trivially_copyable { enum { value = std::is_trivially_copyable<T>::value }; };
  265. #else
  266. template<typename T> struct tbb_trivially_copyable { enum { value = false }; };
  267. template<typename T> struct tbb_trivially_copyable < T* > { enum { value = true }; };
  268. template<> struct tbb_trivially_copyable < bool > { enum { value = true }; };
  269. template<> struct tbb_trivially_copyable < char > { enum { value = true }; };
  270. template<> struct tbb_trivially_copyable < signed char > { enum { value = true }; };
  271. template<> struct tbb_trivially_copyable <unsigned char > { enum { value = true }; };
  272. template<> struct tbb_trivially_copyable < short > { enum { value = true }; };
  273. template<> struct tbb_trivially_copyable <unsigned short > { enum { value = true }; };
  274. template<> struct tbb_trivially_copyable < int > { enum { value = true }; };
  275. template<> struct tbb_trivially_copyable <unsigned int > { enum { value = true }; };
  276. template<> struct tbb_trivially_copyable < long > { enum { value = true }; };
  277. template<> struct tbb_trivially_copyable <unsigned long > { enum { value = true }; };
  278. template<> struct tbb_trivially_copyable < long long> { enum { value = true }; };
  279. template<> struct tbb_trivially_copyable <unsigned long long> { enum { value = true }; };
  280. template<> struct tbb_trivially_copyable < float > { enum { value = true }; };
  281. template<> struct tbb_trivially_copyable < double > { enum { value = true }; };
  282. template<> struct tbb_trivially_copyable < long double > { enum { value = true }; };
  283. #if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED)
  284. template<> struct tbb_trivially_copyable < wchar_t > { enum { value = true }; };
  285. #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
  286. #endif // tbb_trivially_copyable
  287. template<typename T>
  288. struct use_allocator {
  289. enum { value = sizeof(T) > sizeof(void *) || !tbb_trivially_copyable<T>::value };
  290. };
  291. // A helper class to customize how a type is passed between filters.
  292. // Usage: token_helper<T, use_allocator<T>::value>
  293. template<typename T, bool Allocate> class token_helper;
  294. // using tbb_allocator
  295. template<typename T>
  296. class token_helper<T, true> {
  297. public:
  298. typedef typename tbb::tbb_allocator<T> allocator;
  299. typedef T* pointer;
  300. typedef T value_type;
  301. #if __TBB_CPP11_RVALUE_REF_PRESENT
  302. static pointer create_token(value_type && source)
  303. #else
  304. static pointer create_token(const value_type & source)
  305. #endif
  306. {
  307. pointer output_t = allocator().allocate(1);
  308. return new (output_t) T(tbb::internal::move(source));
  309. }
  310. static value_type & token(pointer & t) { return *t; }
  311. static void * cast_to_void_ptr(pointer ref) { return (void *) ref; }
  312. static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; }
  313. static void destroy_token(pointer token) {
  314. allocator().destroy(token);
  315. allocator().deallocate(token,1);
  316. }
  317. };
  318. // pointer specialization
  319. template<typename T>
  320. class token_helper<T*, false> {
  321. public:
  322. typedef T* pointer;
  323. typedef T* value_type;
  324. static pointer create_token(const value_type & source) { return source; }
  325. static value_type & token(pointer & t) { return t; }
  326. static void * cast_to_void_ptr(pointer ref) { return (void *)ref; }
  327. static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; }
  328. static void destroy_token( pointer /*token*/) {}
  329. };
  330. // converting type to and from void*, passing objects directly
  331. template<typename T>
  332. class token_helper<T, false> {
  333. typedef union {
  334. T actual_value;
  335. void * void_overlay;
  336. } type_to_void_ptr_map;
  337. public:
  338. typedef T pointer; // not really a pointer in this case.
  339. typedef T value_type;
  340. static pointer create_token(const value_type & source) { return source; }
  341. static value_type & token(pointer & t) { return t; }
  342. static void * cast_to_void_ptr(pointer ref) {
  343. type_to_void_ptr_map mymap;
  344. mymap.void_overlay = NULL;
  345. mymap.actual_value = ref;
  346. return mymap.void_overlay;
  347. }
  348. static pointer cast_from_void_ptr(void * ref) {
  349. type_to_void_ptr_map mymap;
  350. mymap.void_overlay = ref;
  351. return mymap.actual_value;
  352. }
  353. static void destroy_token( pointer /*token*/) {}
  354. };
  355. // intermediate
  356. template<typename T, typename U, typename Body>
  357. class concrete_filter: public tbb::filter {
  358. const Body& my_body;
  359. typedef token_helper<T,use_allocator<T>::value> t_helper;
  360. typedef typename t_helper::pointer t_pointer;
  361. typedef token_helper<U,use_allocator<U>::value> u_helper;
  362. typedef typename u_helper::pointer u_pointer;
  363. void* operator()(void* input) __TBB_override {
  364. t_pointer temp_input = t_helper::cast_from_void_ptr(input);
  365. u_pointer output_u = u_helper::create_token(my_body(tbb::internal::move(t_helper::token(temp_input))));
  366. t_helper::destroy_token(temp_input);
  367. return u_helper::cast_to_void_ptr(output_u);
  368. }
  369. void finalize(void * input) __TBB_override {
  370. t_pointer temp_input = t_helper::cast_from_void_ptr(input);
  371. t_helper::destroy_token(temp_input);
  372. }
  373. public:
  374. concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}
  375. };
  376. // input
  377. template<typename U, typename Body>
  378. class concrete_filter<void,U,Body>: public filter {
  379. const Body& my_body;
  380. typedef token_helper<U, use_allocator<U>::value> u_helper;
  381. typedef typename u_helper::pointer u_pointer;
  382. void* operator()(void*) __TBB_override {
  383. flow_control control;
  384. u_pointer output_u = u_helper::create_token(my_body(control));
  385. if(control.is_pipeline_stopped) {
  386. u_helper::destroy_token(output_u);
  387. set_end_of_input();
  388. return NULL;
  389. }
  390. return u_helper::cast_to_void_ptr(output_u);
  391. }
  392. public:
  393. concrete_filter(tbb::filter::mode filter_mode, const Body& body) :
  394. filter(static_cast<tbb::filter::mode>(filter_mode | filter_may_emit_null)),
  395. my_body(body)
  396. {}
  397. };
  398. // output
  399. template<typename T, typename Body>
  400. class concrete_filter<T,void,Body>: public filter {
  401. const Body& my_body;
  402. typedef token_helper<T, use_allocator<T>::value> t_helper;
  403. typedef typename t_helper::pointer t_pointer;
  404. void* operator()(void* input) __TBB_override {
  405. t_pointer temp_input = t_helper::cast_from_void_ptr(input);
  406. my_body(tbb::internal::move(t_helper::token(temp_input)));
  407. t_helper::destroy_token(temp_input);
  408. return NULL;
  409. }
  410. void finalize(void* input) __TBB_override {
  411. t_pointer temp_input = t_helper::cast_from_void_ptr(input);
  412. t_helper::destroy_token(temp_input);
  413. }
  414. public:
  415. concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}
  416. };
  417. template<typename Body>
  418. class concrete_filter<void,void,Body>: public filter {
  419. const Body& my_body;
  420. void* operator()(void*) __TBB_override {
  421. flow_control control;
  422. my_body(control);
  423. void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_t)-1;
  424. return output;
  425. }
  426. public:
  427. concrete_filter(filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}
  428. };
  429. //! The class that represents an object of the pipeline for parallel_pipeline().
  430. /** It primarily serves as RAII class that deletes heap-allocated filter instances. */
  431. class pipeline_proxy {
  432. tbb::pipeline my_pipe;
  433. public:
  434. pipeline_proxy( const filter_t<void,void>& filter_chain );
  435. ~pipeline_proxy() {
  436. while( filter* f = my_pipe.filter_list )
  437. delete f; // filter destructor removes it from the pipeline
  438. }
  439. tbb::pipeline* operator->() { return &my_pipe; }
  440. };
  441. //! Abstract base class that represents a node in a parse tree underlying a filter_t.
  442. /** These nodes are always heap-allocated and can be shared by filter_t objects. */
  443. class filter_node: tbb::internal::no_copy {
  444. /** Count must be atomic because it is hidden state for user, but might be shared by threads. */
  445. tbb::atomic<intptr_t> ref_count;
  446. protected:
  447. filter_node() {
  448. ref_count = 0;
  449. #ifdef __TBB_TEST_FILTER_NODE_COUNT
  450. ++(__TBB_TEST_FILTER_NODE_COUNT);
  451. #endif
  452. }
  453. public:
  454. //! Add concrete_filter to pipeline
  455. virtual void add_to( pipeline& ) = 0;
  456. //! Increment reference count
  457. void add_ref() { ++ref_count; }
  458. //! Decrement reference count and delete if it becomes zero.
  459. void remove_ref() {
  460. __TBB_ASSERT(ref_count>0,"ref_count underflow");
  461. if( --ref_count==0 )
  462. delete this;
  463. }
  464. virtual ~filter_node() {
  465. #ifdef __TBB_TEST_FILTER_NODE_COUNT
  466. --(__TBB_TEST_FILTER_NODE_COUNT);
  467. #endif
  468. }
  469. };
  470. //! Node in parse tree representing result of make_filter.
  471. template<typename T, typename U, typename Body>
  472. class filter_node_leaf: public filter_node {
  473. const tbb::filter::mode mode;
  474. const Body body;
  475. void add_to( pipeline& p ) __TBB_override {
  476. concrete_filter<T,U,Body>* f = new concrete_filter<T,U,Body>(mode,body);
  477. p.add_filter( *f );
  478. }
  479. public:
  480. filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(b) {}
  481. };
  482. //! Node in parse tree representing join of two filters.
  483. class filter_node_join: public filter_node {
  484. friend class filter_node; // to suppress GCC 3.2 warnings
  485. filter_node& left;
  486. filter_node& right;
  487. ~filter_node_join() {
  488. left.remove_ref();
  489. right.remove_ref();
  490. }
  491. void add_to( pipeline& p ) __TBB_override {
  492. left.add_to(p);
  493. right.add_to(p);
  494. }
  495. public:
  496. filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y) {
  497. left.add_ref();
  498. right.add_ref();
  499. }
  500. };
  501. } // namespace internal
  502. //! @endcond
  503. //! Create a filter to participate in parallel_pipeline
  504. template<typename T, typename U, typename Body>
  505. filter_t<T,U> make_filter(tbb::filter::mode mode, const Body& body) {
  506. return new internal::filter_node_leaf<T,U,Body>(mode, body);
  507. }
  508. template<typename T, typename V, typename U>
  509. filter_t<T,U> operator& (const filter_t<T,V>& left, const filter_t<V,U>& right) {
  510. __TBB_ASSERT(left.root,"cannot use default-constructed filter_t as left argument of '&'");
  511. __TBB_ASSERT(right.root,"cannot use default-constructed filter_t as right argument of '&'");
  512. return new internal::filter_node_join(*left.root,*right.root);
  513. }
  514. //! Class representing a chain of type-safe pipeline filters
  515. template<typename T, typename U>
  516. class filter_t {
  517. typedef internal::filter_node filter_node;
  518. filter_node* root;
  519. filter_t( filter_node* root_ ) : root(root_) {
  520. root->add_ref();
  521. }
  522. friend class internal::pipeline_proxy;
  523. template<typename T_, typename U_, typename Body>
  524. friend filter_t<T_,U_> make_filter(tbb::filter::mode, const Body& );
  525. template<typename T_, typename V_, typename U_>
  526. friend filter_t<T_,U_> operator& (const filter_t<T_,V_>& , const filter_t<V_,U_>& );
  527. public:
  528. // TODO: add move-constructors, move-assignment, etc. where C++11 is available.
  529. filter_t() : root(NULL) {}
  530. filter_t( const filter_t<T,U>& rhs ) : root(rhs.root) {
  531. if( root ) root->add_ref();
  532. }
  533. template<typename Body>
  534. filter_t( tbb::filter::mode mode, const Body& body ) :
  535. root( new internal::filter_node_leaf<T,U,Body>(mode, body) ) {
  536. root->add_ref();
  537. }
  538. void operator=( const filter_t<T,U>& rhs ) {
  539. // Order of operations below carefully chosen so that reference counts remain correct
  540. // in unlikely event that remove_ref throws exception.
  541. filter_node* old = root;
  542. root = rhs.root;
  543. if( root ) root->add_ref();
  544. if( old ) old->remove_ref();
  545. }
  546. ~filter_t() {
  547. if( root ) root->remove_ref();
  548. }
  549. void clear() {
  550. // Like operator= with filter_t() on right side.
  551. if( root ) {
  552. filter_node* old = root;
  553. root = NULL;
  554. old->remove_ref();
  555. }
  556. }
  557. };
  558. inline internal::pipeline_proxy::pipeline_proxy( const filter_t<void,void>& filter_chain ) : my_pipe() {
  559. __TBB_ASSERT( filter_chain.root, "cannot apply parallel_pipeline to default-constructed filter_t" );
  560. filter_chain.root->add_to(my_pipe);
  561. }
  562. inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t<void,void>& filter_chain
  563. #if __TBB_TASK_GROUP_CONTEXT
  564. , tbb::task_group_context& context
  565. #endif
  566. ) {
  567. internal::pipeline_proxy pipe(filter_chain);
  568. // tbb::pipeline::run() is called via the proxy
  569. pipe->run(max_number_of_live_tokens
  570. #if __TBB_TASK_GROUP_CONTEXT
  571. , context
  572. #endif
  573. );
  574. }
  575. #if __TBB_TASK_GROUP_CONTEXT
  576. inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t<void,void>& filter_chain) {
  577. tbb::task_group_context context;
  578. parallel_pipeline(max_number_of_live_tokens, filter_chain, context);
  579. }
  580. #endif // __TBB_TASK_GROUP_CONTEXT
  581. } // interface6
  582. using interface6::flow_control;
  583. using interface6::filter_t;
  584. using interface6::make_filter;
  585. using interface6::parallel_pipeline;
  586. } // tbb
  587. #include "internal/_warning_suppress_disable_notice.h"
  588. #undef __TBB_pipeline_H_include_area
  589. #endif /* __TBB_pipeline_H */