task.h 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /*
  2. Copyright (c) 2005-2020 Intel Corporation
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. #include "internal/_deprecated_header_message_guard.h"
  14. #if !defined(__TBB_show_deprecation_message_task_H) && defined(__TBB_show_deprecated_header_message)
  15. #define __TBB_show_deprecation_message_task_H
  16. #pragma message("TBB Warning: tbb/task.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
  17. #endif
  18. #if defined(__TBB_show_deprecated_header_message)
  19. #undef __TBB_show_deprecated_header_message
  20. #endif
  21. #ifndef __TBB_task_H
  22. #define __TBB_task_H
  23. #define __TBB_task_H_include_area
  24. #include "internal/_warning_suppress_enable_notice.h"
  25. #include "tbb_stddef.h"
  26. #include "tbb_machine.h"
  27. #include "tbb_profiling.h"
  28. #include <climits>
  29. typedef struct ___itt_caller *__itt_caller;
  30. namespace tbb {
  31. class task;
  32. class task_list;
  33. class task_group_context;
  34. // MSVC does not allow taking the address of a member that was defined
  35. // privately in task_base and made public in class task via a using declaration.
  36. #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
  37. #define __TBB_TASK_BASE_ACCESS public
  38. #else
  39. #define __TBB_TASK_BASE_ACCESS private
  40. #endif
  41. namespace internal { //< @cond INTERNAL
  42. class allocate_additional_child_of_proxy: no_assign {
  43. //! No longer used, but retained for binary layout compatibility. Always NULL.
  44. task* self;
  45. task& parent;
  46. public:
  47. explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {
  48. suppress_unused_warning( self );
  49. }
  50. task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
  51. void __TBB_EXPORTED_METHOD free( task& ) const;
  52. };
  53. struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
  54. } //< namespace internal @endcond
  55. namespace interface5 {
  56. namespace internal {
  57. //! Base class for methods that became static in TBB 3.0.
  58. /** TBB's evolution caused the "this" argument for several methods to become obsolete.
  59. However, for backwards binary compatibility, the new methods need distinct names,
  60. otherwise the One Definition Rule would be broken. Hence the new methods are
  61. defined in this private base class, and then exposed in class task via
  62. using declarations. */
  63. class task_base: tbb::internal::no_copy {
  64. __TBB_TASK_BASE_ACCESS:
  65. friend class tbb::task;
  66. //! Schedule task for execution when a worker becomes available.
  67. static void spawn( task& t );
  68. //! Spawn multiple tasks and clear list.
  69. static void spawn( task_list& list );
  70. //! Like allocate_child, except that task's parent becomes "t", not this.
  71. /** Typically used in conjunction with schedule_to_reexecute to implement while loops.
  72. Atomically increments the reference count of t.parent() */
  73. static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
  74. return tbb::internal::allocate_additional_child_of_proxy(t);
  75. }
  76. //! Destroy a task.
  77. /** Usually, calling this method is unnecessary, because a task is
  78. implicitly deleted after its execute() method runs. However,
  79. sometimes a task needs to be explicitly deallocated, such as
  80. when a root task is used as the parent in spawn_and_wait_for_all. */
  81. static void __TBB_EXPORTED_FUNC destroy( task& victim );
  82. };
  83. } // internal
  84. } // interface5
  85. //! @cond INTERNAL
  86. namespace internal {
  87. class scheduler: no_copy {
  88. public:
  89. //! For internal use only
  90. virtual void spawn( task& first, task*& next ) = 0;
  91. //! For internal use only
  92. virtual void wait_for_all( task& parent, task* child ) = 0;
  93. //! For internal use only
  94. virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
  95. //! Pure virtual destructor;
  96. // Have to have it just to shut up overzealous compilation warnings
  97. virtual ~scheduler() = 0;
  98. //! For internal use only
  99. virtual void enqueue( task& t, void* reserved ) = 0;
  100. };
  101. //! A reference count
  102. /** Should always be non-negative. A signed type is used so that underflow can be detected. */
  103. typedef intptr_t reference_count;
  104. #if __TBB_PREVIEW_RESUMABLE_TASKS
  105. //! The flag to indicate that the wait task has been abandoned.
  106. static const reference_count abandon_flag = reference_count(1) << (sizeof(reference_count)*CHAR_BIT - 2);
  107. #endif
  108. //! An id as used for specifying affinity.
  109. typedef unsigned short affinity_id;
  110. #if __TBB_TASK_ISOLATION
  111. //! A tag for task isolation.
  112. typedef intptr_t isolation_tag;
  113. const isolation_tag no_isolation = 0;
  114. #endif /* __TBB_TASK_ISOLATION */
  115. #if __TBB_TASK_GROUP_CONTEXT
  116. class generic_scheduler;
  117. struct context_list_node_t {
  118. context_list_node_t *my_prev,
  119. *my_next;
  120. };
  121. class allocate_root_with_context_proxy: no_assign {
  122. task_group_context& my_context;
  123. public:
  124. allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
  125. task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
  126. void __TBB_EXPORTED_METHOD free( task& ) const;
  127. };
  128. #endif /* __TBB_TASK_GROUP_CONTEXT */
  129. class allocate_root_proxy: no_assign {
  130. public:
  131. static task& __TBB_EXPORTED_FUNC allocate( size_t size );
  132. static void __TBB_EXPORTED_FUNC free( task& );
  133. };
  134. class allocate_continuation_proxy: no_assign {
  135. public:
  136. task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
  137. void __TBB_EXPORTED_METHOD free( task& ) const;
  138. };
  139. class allocate_child_proxy: no_assign {
  140. public:
  141. task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
  142. void __TBB_EXPORTED_METHOD free( task& ) const;
  143. };
  144. #if __TBB_PREVIEW_CRITICAL_TASKS
  145. // TODO: move to class methods when critical task API becomes public
  146. void make_critical( task& t );
  147. bool is_critical( task& t );
  148. #endif
  149. //! Memory prefix to a task object.
  150. /** This class is internal to the library.
  151. Do not reference it directly, except within the library itself.
  152. Fields are ordered in way that preserves backwards compatibility and yields good packing on
  153. typical 32-bit and 64-bit platforms. New fields should be added at the beginning for
  154. backward compatibility with accesses to the task prefix inlined into application code. To
  155. prevent ODR violation, the class shall have the same layout in all application translation
  156. units. If some fields are conditional (e.g. enabled by preview macros) and might get
  157. skipped, use reserved fields to adjust the layout.
  158. In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64 architectures
  159. correspondingly, consider dynamic setting of task_alignment and task_prefix_reservation_size
  160. based on the maximal operand size supported by the current CPU.
  161. @ingroup task_scheduling */
  162. class task_prefix {
  163. private:
  164. friend class tbb::task;
  165. friend class tbb::interface5::internal::task_base;
  166. friend class tbb::task_list;
  167. friend class internal::scheduler;
  168. friend class internal::allocate_root_proxy;
  169. friend class internal::allocate_child_proxy;
  170. friend class internal::allocate_continuation_proxy;
  171. friend class internal::allocate_additional_child_of_proxy;
  172. #if __TBB_PREVIEW_CRITICAL_TASKS
  173. friend void make_critical( task& );
  174. friend bool is_critical( task& );
  175. #endif
  176. #if __TBB_TASK_ISOLATION
  177. //! The tag used for task isolation.
  178. isolation_tag isolation;
  179. #else
  180. intptr_t reserved_space_for_task_isolation_tag;
  181. #endif /* __TBB_TASK_ISOLATION */
  182. #if __TBB_TASK_GROUP_CONTEXT
  183. //! Shared context that is used to communicate asynchronous state changes
  184. /** Currently it is used to broadcast cancellation requests generated both
  185. by users and as the result of unhandled exceptions in the task::execute()
  186. methods. */
  187. task_group_context *context;
  188. #endif /* __TBB_TASK_GROUP_CONTEXT */
  189. //! The scheduler that allocated the task, or NULL if the task is big.
  190. /** Small tasks are pooled by the scheduler that allocated the task.
  191. If a scheduler needs to free a small task allocated by another scheduler,
  192. it returns the task to that other scheduler. This policy avoids
  193. memory space blowup issues for memory allocators that allocate from
  194. thread-specific pools. */
  195. scheduler* origin;
  196. #if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS
  197. union {
  198. #endif /* __TBB_TASK_PRIORITY */
  199. //! Obsolete. The scheduler that owns the task.
  200. /** Retained only for the sake of backward binary compatibility.
  201. Still used by inline methods in the task.h header. **/
  202. scheduler* owner;
  203. #if __TBB_TASK_PRIORITY
  204. //! Pointer to the next offloaded lower priority task.
  205. /** Used to maintain a list of offloaded tasks inside the scheduler. **/
  206. task* next_offloaded;
  207. #endif
  208. #if __TBB_PREVIEW_RESUMABLE_TASKS
  209. //! Pointer to the abandoned scheduler where the current task is waited for.
  210. scheduler* abandoned_scheduler;
  211. #endif
  212. #if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS
  213. };
  214. #endif /* __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS */
  215. //! The task whose reference count includes me.
  216. /** In the "blocking style" of programming, this field points to the parent task.
  217. In the "continuation-passing style" of programming, this field points to the
  218. continuation of the parent. */
  219. tbb::task* parent;
  220. //! Reference count used for synchronization.
  221. /** In the "continuation-passing style" of programming, this field is
  222. the difference of the number of allocated children minus the
  223. number of children that have completed.
  224. In the "blocking style" of programming, this field is one more than the difference. */
  225. __TBB_atomic reference_count ref_count;
  226. //! Obsolete. Used to be scheduling depth before TBB 2.2
  227. /** Retained only for the sake of backward binary compatibility.
  228. Not used by TBB anymore. **/
  229. int depth;
  230. //! A task::state_type, stored as a byte for compactness.
  231. /** This state is exposed to users via method task::state(). */
  232. unsigned char state;
  233. //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
  234. /** 0x0 -> version 1.0 task
  235. 0x1 -> version >=2.1 task
  236. 0x10 -> task was enqueued
  237. 0x20 -> task_proxy
  238. 0x40 -> task has live ref_count
  239. 0x80 -> a stolen task */
  240. unsigned char extra_state;
  241. affinity_id affinity;
  242. //! "next" field for list of task
  243. tbb::task* next;
  244. //! The task corresponding to this task_prefix.
  245. tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
  246. };
  247. } // namespace internal
  248. //! @endcond
  249. #if __TBB_TASK_GROUP_CONTEXT
  250. #if __TBB_TASK_PRIORITY
  251. namespace internal {
  252. static const int priority_stride_v4 = INT_MAX / 4;
  253. #if __TBB_PREVIEW_CRITICAL_TASKS
  254. // TODO: move into priority_t enum when critical tasks become public feature
  255. static const int priority_critical = priority_stride_v4 * 3 + priority_stride_v4 / 3 * 2;
  256. #endif
  257. }
  258. enum priority_t {
  259. priority_normal = internal::priority_stride_v4 * 2,
  260. priority_low = priority_normal - internal::priority_stride_v4,
  261. priority_high = priority_normal + internal::priority_stride_v4
  262. };
  263. #endif /* __TBB_TASK_PRIORITY */
  264. #if TBB_USE_CAPTURED_EXCEPTION
  265. class tbb_exception;
  266. #else
  267. namespace internal {
  268. class tbb_exception_ptr;
  269. }
  270. #endif /* !TBB_USE_CAPTURED_EXCEPTION */
  271. class task_scheduler_init;
  272. namespace interface7 { class task_arena; }
  273. using interface7::task_arena;
  274. //! Used to form groups of tasks
  275. /** @ingroup task_scheduling
  276. The context services explicit cancellation requests from user code, and unhandled
  277. exceptions intercepted during tasks execution. Intercepting an exception results
  278. in generating internal cancellation requests (which is processed in exactly the
  279. same way as external ones).
  280. The context is associated with one or more root tasks and defines the cancellation
  281. group that includes all the descendants of the corresponding root task(s). Association
  282. is established when a context object is passed as an argument to the task::allocate_root()
  283. method. See task_group_context::task_group_context for more details.
  284. The context can be bound to another one, and other contexts can be bound to it,
  285. forming a tree-like structure: parent -> this -> children. Arrows here designate
  286. cancellation propagation direction. If a task in a cancellation group is cancelled
  287. all the other tasks in this group and groups bound to it (as children) get cancelled too.
  288. IMPLEMENTATION NOTE:
  289. When adding new members to task_group_context or changing types of existing ones,
  290. update the size of both padding buffers (_leading_padding and _trailing_padding)
  291. appropriately. See also VERSIONING NOTE at the constructor definition below. **/
  292. class task_group_context : internal::no_copy {
  293. private:
  294. friend class internal::generic_scheduler;
  295. friend class task_scheduler_init;
  296. friend class task_arena;
  297. #if TBB_USE_CAPTURED_EXCEPTION
  298. typedef tbb_exception exception_container_type;
  299. #else
  300. typedef internal::tbb_exception_ptr exception_container_type;
  301. #endif
  302. enum version_traits_word_layout {
  303. traits_offset = 16,
  304. version_mask = 0xFFFF,
  305. traits_mask = 0xFFFFul << traits_offset
  306. };
  307. public:
  308. enum kind_type {
  309. isolated,
  310. bound
  311. };
  312. enum traits_type {
  313. exact_exception = 0x0001ul << traits_offset,
  314. #if __TBB_FP_CONTEXT
  315. fp_settings = 0x0002ul << traits_offset,
  316. #endif
  317. concurrent_wait = 0x0004ul << traits_offset,
  318. #if TBB_USE_CAPTURED_EXCEPTION
  319. default_traits = 0
  320. #else
  321. default_traits = exact_exception
  322. #endif /* !TBB_USE_CAPTURED_EXCEPTION */
  323. };
  324. private:
  325. enum state {
  326. may_have_children = 1,
  327. // the following enumerations must be the last, new 2^x values must go above
  328. next_state_value, low_unused_state_bit = (next_state_value-1)*2
  329. };
  330. union {
  331. //! Flavor of this context: bound or isolated.
  332. // TODO: describe asynchronous use, and whether any memory semantics are needed
  333. __TBB_atomic kind_type my_kind;
  334. uintptr_t _my_kind_aligner;
  335. };
  336. //! Pointer to the context of the parent cancellation group. NULL for isolated contexts.
  337. task_group_context *my_parent;
  338. //! Used to form the thread specific list of contexts without additional memory allocation.
  339. /** A context is included into the list of the current thread when its binding to
  340. its parent happens. Any context can be present in the list of one thread only. **/
  341. internal::context_list_node_t my_node;
  342. //! Used to set and maintain stack stitching point for Intel Performance Tools.
  343. __itt_caller itt_caller;
  344. //! Leading padding protecting accesses to frequently used members from false sharing.
  345. /** Read accesses to the field my_cancellation_requested are on the hot path inside
  346. the scheduler. This padding ensures that this field never shares the same cache
  347. line with a local variable that is frequently written to. **/
  348. char _leading_padding[internal::NFS_MaxLineSize
  349. - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
  350. - sizeof(__itt_caller)
  351. #if __TBB_FP_CONTEXT
  352. - sizeof(internal::cpu_ctl_env_space)
  353. #endif
  354. ];
  355. #if __TBB_FP_CONTEXT
  356. //! Space for platform-specific FPU settings.
  357. /** Must only be accessed inside TBB binaries, and never directly in user
  358. code or inline methods. */
  359. internal::cpu_ctl_env_space my_cpu_ctl_env;
  360. #endif
  361. //! Specifies whether cancellation was requested for this task group.
  362. uintptr_t my_cancellation_requested;
  363. //! Version for run-time checks and behavioral traits of the context.
  364. /** Version occupies low 16 bits, and traits (zero or more ORed enumerators
  365. from the traits_type enumerations) take the next 16 bits.
  366. Original (zeroth) version of the context did not support any traits. **/
  367. uintptr_t my_version_and_traits;
  368. //! Pointer to the container storing exception being propagated across this task group.
  369. exception_container_type *my_exception;
  370. //! Scheduler instance that registered this context in its thread specific list.
  371. internal::generic_scheduler *my_owner;
  372. //! Internal state (combination of state flags, currently only may_have_children).
  373. uintptr_t my_state;
  374. #if __TBB_TASK_PRIORITY
  375. //! Priority level of the task group (in normalized representation)
  376. intptr_t my_priority;
  377. #endif /* __TBB_TASK_PRIORITY */
  378. //! Description of algorithm for scheduler based instrumentation.
  379. internal::string_index my_name;
  380. //! Trailing padding protecting accesses to frequently used members from false sharing
  381. /** \sa _leading_padding **/
  382. char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
  383. #if __TBB_TASK_PRIORITY
  384. - sizeof(intptr_t)
  385. #endif /* __TBB_TASK_PRIORITY */
  386. - sizeof(internal::string_index)
  387. ];
  388. public:
  389. //! Default & binding constructor.
  390. /** By default a bound context is created. That is this context will be bound
  391. (as child) to the context of the task calling task::allocate_root(this_context)
  392. method. Cancellation requests passed to the parent context are propagated
  393. to all the contexts bound to it. Similarly priority change is propagated
  394. from the parent context to its children.
  395. If task_group_context::isolated is used as the argument, then the tasks associated
  396. with this context will never be affected by events in any other context.
  397. Creating isolated contexts involve much less overhead, but they have limited
  398. utility. Normally when an exception occurs in an algorithm that has nested
  399. ones running, it is desirably to have all the nested algorithms cancelled
  400. as well. Such a behavior requires nested algorithms to use bound contexts.
  401. There is one good place where using isolated algorithms is beneficial. It is
  402. a master thread. That is if a particular algorithm is invoked directly from
  403. the master thread (not from a TBB task), supplying it with explicitly
  404. created isolated context will result in a faster algorithm startup.
  405. VERSIONING NOTE:
  406. Implementation(s) of task_group_context constructor(s) cannot be made
  407. entirely out-of-line because the run-time version must be set by the user
  408. code. This will become critically important for binary compatibility, if
  409. we ever have to change the size of the context object.
  410. Boosting the runtime version will also be necessary if new data fields are
  411. introduced in the currently unused padding areas and these fields are updated
  412. by inline methods. **/
  413. task_group_context ( kind_type relation_with_parent = bound,
  414. uintptr_t t = default_traits )
  415. : my_kind(relation_with_parent)
  416. , my_version_and_traits(3 | t)
  417. , my_name(internal::CUSTOM_CTX)
  418. {
  419. init();
  420. }
  421. // Custom constructor for instrumentation of TBB algorithm
  422. task_group_context ( internal::string_index name )
  423. : my_kind(bound)
  424. , my_version_and_traits(3 | default_traits)
  425. , my_name(name)
  426. {
  427. init();
  428. }
  429. // Do not introduce standalone unbind method since it will break state propagation assumptions
  430. __TBB_EXPORTED_METHOD ~task_group_context ();
  431. //! Forcefully reinitializes the context after the task tree it was associated with is completed.
  432. /** Because the method assumes that all the tasks that used to be associated with
  433. this context have already finished, calling it while the context is still
  434. in use somewhere in the task hierarchy leads to undefined behavior.
  435. IMPORTANT: This method is not thread safe!
  436. The method does not change the context's parent if it is set. **/
  437. void __TBB_EXPORTED_METHOD reset ();
  438. //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
  439. /** \return false if cancellation has already been requested, true otherwise.
  440. Note that canceling never fails. When false is returned, it just means that
  441. another thread (or this one) has already sent cancellation request to this
  442. context or to one of its ancestors (if this context is bound). It is guaranteed
  443. that when this method is concurrently called on the same not yet cancelled
  444. context, true will be returned by one and only one invocation. **/
  445. bool __TBB_EXPORTED_METHOD cancel_group_execution ();
  446. //! Returns true if the context received cancellation request.
  447. bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
  448. //! Records the pending exception, and cancels the task group.
  449. /** May be called only from inside a catch-block. If the context is already
  450. cancelled, does nothing.
  451. The method brings the task group associated with this context exactly into
  452. the state it would be in, if one of its tasks threw the currently pending
  453. exception during its execution. In other words, it emulates the actions
  454. of the scheduler's dispatch loop exception handler. **/
  455. void __TBB_EXPORTED_METHOD register_pending_exception ();
  456. #if __TBB_FP_CONTEXT
  457. //! Captures the current FPU control settings to the context.
  458. /** Because the method assumes that all the tasks that used to be associated with
  459. this context have already finished, calling it while the context is still
  460. in use somewhere in the task hierarchy leads to undefined behavior.
  461. IMPORTANT: This method is not thread safe!
  462. The method does not change the FPU control settings of the context's parent. **/
  463. void __TBB_EXPORTED_METHOD capture_fp_settings ();
  464. #endif
  465. #if __TBB_TASK_PRIORITY
  466. //! Changes priority of the task group
  467. __TBB_DEPRECATED_IN_VERBOSE_MODE void set_priority ( priority_t );
  468. //! Retrieves current priority of the current task group
  469. __TBB_DEPRECATED_IN_VERBOSE_MODE priority_t priority () const;
  470. #endif /* __TBB_TASK_PRIORITY */
  471. //! Returns the context's trait
  472. uintptr_t traits() const { return my_version_and_traits & traits_mask; }
  473. protected:
  474. //! Out-of-line part of the constructor.
  475. /** Singled out to ensure backward binary compatibility of the future versions. **/
  476. void __TBB_EXPORTED_METHOD init ();
  477. private:
  478. friend class task;
  479. friend class internal::allocate_root_with_context_proxy;
  480. static const kind_type binding_required = bound;
  481. static const kind_type binding_completed = kind_type(bound+1);
  482. static const kind_type detached = kind_type(binding_completed+1);
  483. static const kind_type dying = kind_type(detached+1);
  484. //! Propagates any state change detected to *this, and as an optimisation possibly also upward along the heritage line.
  485. template <typename T>
  486. void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
  487. //! Registers this context with the local scheduler and binds it to its parent context
  488. void bind_to ( internal::generic_scheduler *local_sched );
  489. //! Registers this context with the local scheduler
  490. void register_with ( internal::generic_scheduler *local_sched );
  491. #if __TBB_FP_CONTEXT
  492. //! Copies FPU control setting from another context
  493. // TODO: Consider adding #else stub in order to omit #if sections in other code
  494. void copy_fp_settings( const task_group_context &src );
  495. #endif /* __TBB_FP_CONTEXT */
  496. }; // class task_group_context
  497. #endif /* __TBB_TASK_GROUP_CONTEXT */
  498. //! Base class for user-defined tasks.
  499. /** @ingroup task_scheduling */
  500. class __TBB_DEPRECATED_IN_VERBOSE_MODE task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
  501. //! Set reference count
  502. void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
  503. //! Decrement reference count and return its new value.
  504. internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
  505. protected:
  506. //! Default constructor.
  507. task() {prefix().extra_state=1;}
  508. public:
  509. //! Destructor.
  510. virtual ~task() {}
  511. //! Should be overridden by derived classes.
  512. virtual task* execute() = 0;
  513. //! Enumeration of task states that the scheduler considers.
  514. enum state_type {
  515. //! task is running, and will be destroyed after method execute() completes.
  516. executing,
  517. //! task to be rescheduled.
  518. reexecute,
  519. //! task is in ready pool, or is going to be put there, or was just taken off.
  520. ready,
  521. //! task object is freshly allocated or recycled.
  522. allocated,
  523. //! task object is on free list, or is going to be put there, or was just taken off.
  524. freed,
  525. //! task to be recycled as continuation
  526. recycle
  527. #if __TBB_RECYCLE_TO_ENQUEUE
  528. //! task to be scheduled for starvation-resistant execution
  529. ,to_enqueue
  530. #endif
  531. #if __TBB_PREVIEW_RESUMABLE_TASKS
  532. //! a special task used to resume a scheduler.
  533. ,to_resume
  534. #endif
  535. };
  536. //------------------------------------------------------------------------
  537. // Allocating tasks
  538. //------------------------------------------------------------------------
  539. //! Returns proxy for overloaded new that allocates a root task.
  540. static internal::allocate_root_proxy allocate_root() {
  541. return internal::allocate_root_proxy();
  542. }
  543. #if __TBB_TASK_GROUP_CONTEXT
  544. //! Returns proxy for overloaded new that allocates a root task associated with user supplied context.
  545. static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
  546. return internal::allocate_root_with_context_proxy(ctx);
  547. }
  548. #endif /* __TBB_TASK_GROUP_CONTEXT */
  549. //! Returns proxy for overloaded new that allocates a continuation task of *this.
  550. /** The continuation's parent becomes the parent of *this. */
  551. internal::allocate_continuation_proxy& allocate_continuation() {
  552. return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
  553. }
  554. //! Returns proxy for overloaded new that allocates a child task of *this.
  555. internal::allocate_child_proxy& allocate_child() {
  556. return *reinterpret_cast<internal::allocate_child_proxy*>(this);
  557. }
  558. //! Define recommended static form via import from base class.
  559. using task_base::allocate_additional_child_of;
  560. #if __TBB_DEPRECATED_TASK_INTERFACE
  561. //! Destroy a task.
  562. /** Usually, calling this method is unnecessary, because a task is
  563. implicitly deleted after its execute() method runs. However,
  564. sometimes a task needs to be explicitly deallocated, such as
  565. when a root task is used as the parent in spawn_and_wait_for_all. */
  566. void __TBB_EXPORTED_METHOD destroy( task& t );
  567. #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
  568. //! Define recommended static form via import from base class.
  569. using task_base::destroy;
  570. #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
  571. //------------------------------------------------------------------------
  572. // Recycling of tasks
  573. //------------------------------------------------------------------------
  574. //! Change this to be a continuation of its former self.
  575. /** The caller must guarantee that the task's refcount does not become zero until
  576. after the method execute() returns. Typically, this is done by having
  577. method execute() return a pointer to a child of the task. If the guarantee
  578. cannot be made, use method recycle_as_safe_continuation instead.
  579. Because of the hazard, this method may be deprecated in the future. */
  580. void recycle_as_continuation() {
  581. __TBB_ASSERT( prefix().state==executing, "execute not running?" );
  582. prefix().state = allocated;
  583. }
  584. //! Recommended to use, safe variant of recycle_as_continuation
  585. /** For safety, it requires additional increment of ref_count.
  586. With no descendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */
  587. void recycle_as_safe_continuation() {
  588. __TBB_ASSERT( prefix().state==executing, "execute not running?" );
  589. prefix().state = recycle;
  590. }
  591. //! Change this to be a child of new_parent.
  592. void recycle_as_child_of( task& new_parent ) {
  593. internal::task_prefix& p = prefix();
  594. __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
  595. __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
  596. __TBB_ASSERT( p.parent==NULL, "parent must be null" );
  597. __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
  598. __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
  599. p.state = allocated;
  600. p.parent = &new_parent;
  601. #if __TBB_TASK_GROUP_CONTEXT
  602. p.context = new_parent.prefix().context;
  603. #endif /* __TBB_TASK_GROUP_CONTEXT */
  604. }
  605. //! Schedule this for reexecution after current execute() returns.
  606. /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */
  607. void recycle_to_reexecute() {
  608. __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
  609. __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
  610. prefix().state = reexecute;
  611. }
  612. #if __TBB_RECYCLE_TO_ENQUEUE
  613. //! Schedule this to enqueue after descendant tasks complete.
  614. /** Save enqueue/spawn difference, it has the semantics of recycle_as_safe_continuation. */
  615. void recycle_to_enqueue() {
  616. __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
  617. prefix().state = to_enqueue;
  618. }
  619. #endif /* __TBB_RECYCLE_TO_ENQUEUE */
  620. //------------------------------------------------------------------------
  621. // Spawning and blocking
  622. //------------------------------------------------------------------------
  623. //! Set reference count
  624. void set_ref_count( int count ) {
  625. #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
  626. internal_set_ref_count(count);
  627. #else
  628. prefix().ref_count = count;
  629. #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
  630. }
  631. //! Atomically increment reference count.
  632. /** Has acquire semantics */
  633. void increment_ref_count() {
  634. __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
  635. }
  636. //! Atomically adds to reference count and returns its new value.
  637. /** Has release-acquire semantics */
  638. int add_ref_count( int count ) {
  639. internal::call_itt_notify( internal::releasing, &prefix().ref_count );
  640. internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
  641. __TBB_ASSERT( k>=0, "task's reference count underflowed" );
  642. if( k==0 )
  643. internal::call_itt_notify( internal::acquired, &prefix().ref_count );
  644. return int(k);
  645. }
  646. //! Atomically decrement reference count and returns its new value.
  647. /** Has release semantics. */
  648. int decrement_ref_count() {
  649. #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
  650. return int(internal_decrement_ref_count());
  651. #else
  652. return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
  653. #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
  654. }
  655. //! Define recommended static forms via import from base class.
  656. using task_base::spawn;
  657. //! Similar to spawn followed by wait_for_all, but more efficient.
  658. void spawn_and_wait_for_all( task& child ) {
  659. prefix().owner->wait_for_all( *this, &child );
  660. }
  661. //! Similar to spawn followed by wait_for_all, but more efficient.
  662. void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
  663. //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
  664. static void spawn_root_and_wait( task& root ) {
  665. root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
  666. }
  667. //! Spawn root tasks on list and wait for all of them to finish.
  668. /** If there are more tasks than worker threads, the tasks are spawned in
  669. order of front to back. */
  670. static void spawn_root_and_wait( task_list& root_list );
  671. //! Wait for reference count to become one, and set reference count to zero.
  672. /** Works on tasks while waiting. */
  673. void wait_for_all() {
  674. prefix().owner->wait_for_all( *this, NULL );
  675. }
  676. //! Enqueue task for starvation-resistant execution.
  677. #if __TBB_TASK_PRIORITY
  678. /** The task will be enqueued on the normal priority level disregarding the
  679. priority of its task group.
  680. The rationale of such semantics is that priority of an enqueued task is
  681. statically fixed at the moment of its enqueuing, while task group priority
  682. is dynamic. Thus automatic priority inheritance would be generally a subject
  683. to the race, which may result in unexpected behavior.
  684. Use enqueue() overload with explicit priority value and task::group_priority()
  685. method to implement such priority inheritance when it is really necessary. **/
  686. #endif /* __TBB_TASK_PRIORITY */
  687. static void enqueue( task& t ) {
  688. t.prefix().owner->enqueue( t, NULL );
  689. }
  690. #if __TBB_TASK_PRIORITY
  691. //! Enqueue task for starvation-resistant execution on the specified priority level.
  692. static void enqueue( task& t, priority_t p ) {
  693. #if __TBB_PREVIEW_CRITICAL_TASKS
  694. __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high
  695. || p == internal::priority_critical, "Invalid priority level value");
  696. #else
  697. __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
  698. #endif
  699. t.prefix().owner->enqueue( t, (void*)p );
  700. }
  701. #endif /* __TBB_TASK_PRIORITY */
  702. //! Enqueue task in task_arena
  703. //! The implementation is in task_arena.h
  704. #if __TBB_TASK_PRIORITY
  705. inline static void enqueue( task& t, task_arena& arena, priority_t p = priority_t(0) );
  706. #else
  707. inline static void enqueue( task& t, task_arena& arena);
  708. #endif
  709. //! The innermost task being executed or destroyed by the current thread at the moment.
  710. static task& __TBB_EXPORTED_FUNC self();
  711. //! task on whose behalf this task is working, or NULL if this is a root.
  712. task* parent() const {return prefix().parent;}
  713. //! sets parent task pointer to specified value
  714. void set_parent(task* p) {
  715. #if __TBB_TASK_GROUP_CONTEXT
  716. __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
  717. #endif
  718. prefix().parent = p;
  719. }
  720. #if __TBB_TASK_GROUP_CONTEXT
  721. //! This method is deprecated and will be removed in the future.
  722. /** Use method group() instead. **/
  723. task_group_context* context() {return prefix().context;}
  724. //! Pointer to the task group descriptor.
  725. task_group_context* group () { return prefix().context; }
  726. #endif /* __TBB_TASK_GROUP_CONTEXT */
  727. //! True if task was stolen from the task pool of another thread.
  728. bool is_stolen_task() const {
  729. return (prefix().extra_state & 0x80)!=0;
  730. }
  731. //! True if the task was enqueued
  732. bool is_enqueued_task() const {
  733. // es_task_enqueued = 0x10
  734. return (prefix().extra_state & 0x10)!=0;
  735. }
  736. #if __TBB_PREVIEW_RESUMABLE_TASKS
  737. //! Type that defines suspension point
  738. typedef void* suspend_point;
  739. //! Suspend current task execution
  740. template <typename F>
  741. static void suspend(F f);
  742. //! Resume specific suspend point
  743. static void resume(suspend_point tag);
  744. #endif
  745. //------------------------------------------------------------------------
  746. // Debugging
  747. //------------------------------------------------------------------------
  748. //! Current execution state
  749. state_type state() const {return state_type(prefix().state);}
  750. //! The internal reference count.
  751. int ref_count() const {
  752. #if TBB_USE_ASSERT
  753. #if __TBB_PREVIEW_RESUMABLE_TASKS
  754. internal::reference_count ref_count_ = prefix().ref_count & ~internal::abandon_flag;
  755. #else
  756. internal::reference_count ref_count_ = prefix().ref_count;
  757. #endif
  758. __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
  759. #endif
  760. #if __TBB_PREVIEW_RESUMABLE_TASKS
  761. return int(prefix().ref_count & ~internal::abandon_flag);
  762. #else
  763. return int(prefix().ref_count);
  764. #endif
  765. }
  766. //! Obsolete, and only retained for the sake of backward compatibility. Always returns true.
  767. bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
  768. //------------------------------------------------------------------------
  769. // Affinity
  770. //------------------------------------------------------------------------
  771. //! An id as used for specifying affinity.
  772. /** Guaranteed to be integral type. Value of 0 means no affinity. */
  773. typedef internal::affinity_id affinity_id;
  774. //! Set affinity for this task.
  775. void set_affinity( affinity_id id ) {prefix().affinity = id;}
  776. //! Current affinity of this task
  777. affinity_id affinity() const {return prefix().affinity;}
  778. //! Invoked by scheduler to notify task that it ran on unexpected thread.
  779. /** Invoked before method execute() runs, if task is stolen, or task has
  780. affinity but will be executed on another thread.
  781. The default action does nothing. */
  782. virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
  783. #if __TBB_TASK_GROUP_CONTEXT
  784. //! Moves this task from its current group into another one.
  785. /** Argument ctx specifies the new group.
  786. The primary purpose of this method is to associate unique task group context
  787. with a task allocated for subsequent enqueuing. In contrast to spawned tasks
  788. enqueued ones normally outlive the scope where they were created. This makes
  789. traditional usage model where task group context are allocated locally on
  790. the stack inapplicable. Dynamic allocation of context objects is performance
  791. inefficient. Method change_group() allows to make task group context object
  792. a member of the task class, and then associate it with its containing task
  793. object in the latter's constructor. **/
  794. void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
  795. //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
  796. /** \return false if cancellation has already been requested, true otherwise. **/
  797. bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
  798. //! Returns true if the context has received cancellation request.
  799. bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
  800. #else
  801. bool is_cancelled () const { return false; }
  802. #endif /* __TBB_TASK_GROUP_CONTEXT */
  803. #if __TBB_TASK_PRIORITY
  804. //! Changes priority of the task group this task belongs to.
  805. __TBB_DEPRECATED void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
  806. //! Retrieves current priority of the task group this task belongs to.
  807. __TBB_DEPRECATED priority_t group_priority () const { return prefix().context->priority(); }
  808. #endif /* __TBB_TASK_PRIORITY */
  809. private:
  810. friend class interface5::internal::task_base;
  811. friend class task_list;
  812. friend class internal::scheduler;
  813. friend class internal::allocate_root_proxy;
  814. #if __TBB_TASK_GROUP_CONTEXT
  815. friend class internal::allocate_root_with_context_proxy;
  816. #endif /* __TBB_TASK_GROUP_CONTEXT */
  817. friend class internal::allocate_continuation_proxy;
  818. friend class internal::allocate_child_proxy;
  819. friend class internal::allocate_additional_child_of_proxy;
  820. //! Get reference to corresponding task_prefix.
  821. /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/
  822. internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
  823. return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
  824. }
  825. #if __TBB_PREVIEW_CRITICAL_TASKS
  826. friend void internal::make_critical( task& );
  827. friend bool internal::is_critical( task& );
  828. #endif
  829. }; // class task
  830. #if __TBB_PREVIEW_CRITICAL_TASKS
  831. namespace internal {
  832. inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; }
  833. inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); }
  834. } // namespace internal
  835. #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
  836. #if __TBB_PREVIEW_RESUMABLE_TASKS
  837. namespace internal {
  838. template <typename F>
  839. static void suspend_callback(void* user_callback, task::suspend_point tag) {
  840. // Copy user function to a new stack to avoid a race when the previous scheduler is resumed.
  841. F user_callback_copy = *static_cast<F*>(user_callback);
  842. user_callback_copy(tag);
  843. }
  844. void __TBB_EXPORTED_FUNC internal_suspend(void* suspend_callback, void* user_callback);
  845. void __TBB_EXPORTED_FUNC internal_resume(task::suspend_point);
  846. task::suspend_point __TBB_EXPORTED_FUNC internal_current_suspend_point();
  847. }
  848. template <typename F>
  849. inline void task::suspend(F f) {
  850. internal::internal_suspend((void*)internal::suspend_callback<F>, &f);
  851. }
  852. inline void task::resume(suspend_point tag) {
  853. internal::internal_resume(tag);
  854. }
  855. #endif
  856. //! task that does nothing. Useful for synchronization.
  857. /** @ingroup task_scheduling */
  858. class __TBB_DEPRECATED_IN_VERBOSE_MODE empty_task: public task {
  859. task* execute() __TBB_override {
  860. return NULL;
  861. }
  862. };
  863. //! @cond INTERNAL
  864. namespace internal {
  865. template<typename F>
  866. class function_task : public task {
  867. #if __TBB_ALLOW_MUTABLE_FUNCTORS
  868. // TODO: deprecated behavior, remove
  869. F my_func;
  870. #else
  871. const F my_func;
  872. #endif
  873. task* execute() __TBB_override {
  874. my_func();
  875. return NULL;
  876. }
  877. public:
  878. function_task( const F& f ) : my_func(f) {}
  879. #if __TBB_CPP11_RVALUE_REF_PRESENT
  880. function_task( F&& f ) : my_func( std::move(f) ) {}
  881. #endif
  882. };
  883. } // namespace internal
  884. //! @endcond
  885. //! A list of children.
  886. /** Used for method task::spawn_children
  887. @ingroup task_scheduling */
  888. class __TBB_DEPRECATED_IN_VERBOSE_MODE task_list: internal::no_copy {
  889. private:
  890. task* first;
  891. task** next_ptr;
  892. friend class task;
  893. friend class interface5::internal::task_base;
  894. public:
  895. //! Construct empty list
  896. task_list() : first(NULL), next_ptr(&first) {}
  897. //! Destroys the list, but does not destroy the task objects.
  898. ~task_list() {}
  899. //! True if list is empty; false otherwise.
  900. bool empty() const {return !first;}
  901. //! Push task onto back of list.
  902. void push_back( task& task ) {
  903. task.prefix().next = NULL;
  904. *next_ptr = &task;
  905. next_ptr = &task.prefix().next;
  906. }
  907. #if __TBB_TODO
  908. // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
  909. //! Push task onto front of list (FIFO local execution, like individual spawning in the same order).
  910. void push_front( task& task ) {
  911. if( empty() ) {
  912. push_back(task);
  913. } else {
  914. task.prefix().next = first;
  915. first = &task;
  916. }
  917. }
  918. #endif
  919. //! Pop the front task from the list.
  920. task& pop_front() {
  921. __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
  922. task* result = first;
  923. first = result->prefix().next;
  924. if( !first ) next_ptr = &first;
  925. return *result;
  926. }
  927. //! Clear the list
  928. void clear() {
  929. first=NULL;
  930. next_ptr=&first;
  931. }
  932. };
  933. inline void interface5::internal::task_base::spawn( task& t ) {
  934. t.prefix().owner->spawn( t, t.prefix().next );
  935. }
  936. inline void interface5::internal::task_base::spawn( task_list& list ) {
  937. if( task* t = list.first ) {
  938. t->prefix().owner->spawn( *t, *list.next_ptr );
  939. list.clear();
  940. }
  941. }
  942. inline void task::spawn_root_and_wait( task_list& root_list ) {
  943. if( task* t = root_list.first ) {
  944. t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
  945. root_list.clear();
  946. }
  947. }
  948. } // namespace tbb
  949. inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
  950. return &tbb::internal::allocate_root_proxy::allocate(bytes);
  951. }
  952. inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
  953. tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
  954. }
  955. #if __TBB_TASK_GROUP_CONTEXT
  956. inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
  957. return &p.allocate(bytes);
  958. }
  959. inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
  960. p.free( *static_cast<tbb::task*>(task) );
  961. }
  962. #endif /* __TBB_TASK_GROUP_CONTEXT */
  963. inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
  964. return &p.allocate(bytes);
  965. }
  966. inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
  967. p.free( *static_cast<tbb::task*>(task) );
  968. }
  969. inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
  970. return &p.allocate(bytes);
  971. }
  972. inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
  973. p.free( *static_cast<tbb::task*>(task) );
  974. }
  975. inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
  976. return &p.allocate(bytes);
  977. }
  978. inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
  979. p.free( *static_cast<tbb::task*>(task) );
  980. }
  981. #include "internal/_warning_suppress_disable_notice.h"
  982. #undef __TBB_task_H_include_area
  983. #endif /* __TBB_task_H */