_concurrent_unordered_impl.h 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684
  1. /*
  2. Copyright (c) 2005-2020 Intel Corporation
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. /* Container implementations in this header are based on PPL implementations
  14. provided by Microsoft. */
  15. #ifndef __TBB__concurrent_unordered_impl_H
  16. #define __TBB__concurrent_unordered_impl_H
  17. #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
  18. #error Do not #include this internal file directly; use public TBB headers instead.
  19. #endif
  20. #include "../tbb_stddef.h"
  21. #include <iterator>
  22. #include <utility> // Need std::pair
  23. #include <functional> // Need std::equal_to (in ../concurrent_unordered_*.h)
  24. #include <string> // For tbb_hasher
  25. #include <cstring> // Need std::memset
  26. #include __TBB_STD_SWAP_HEADER
  27. #include "../atomic.h"
  28. #include "../tbb_exception.h"
  29. #include "../tbb_allocator.h"
  30. #if __TBB_INITIALIZER_LISTS_PRESENT
  31. #include <initializer_list>
  32. #endif
  33. #if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_COPY_DELETION_BROKEN
  34. #define __TBB_UNORDERED_NODE_HANDLE_PRESENT 1
  35. #endif
  36. #include "_allocator_traits.h"
  37. #include "_tbb_hash_compare_impl.h"
  38. #include "_template_helpers.h"
  39. #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
  40. #include "_node_handle_impl.h"
  41. #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
  42. namespace tbb {
  43. namespace interface5 {
  44. //! @cond INTERNAL
  45. namespace internal {
  46. template <typename T, typename Allocator>
  47. class split_ordered_list;
  48. template <typename Traits>
  49. class concurrent_unordered_base;
  50. // Forward list iterators (without skipping dummy elements)
  51. template<class Solist, typename Value>
  52. class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
  53. {
  54. template <typename T, typename Allocator>
  55. friend class split_ordered_list;
  56. template <typename Traits>
  57. friend class concurrent_unordered_base;
  58. template<class M, typename V>
  59. friend class flist_iterator;
  60. typedef typename Solist::nodeptr_t nodeptr_t;
  61. public:
  62. typedef typename Solist::value_type value_type;
  63. typedef typename Solist::difference_type difference_type;
  64. typedef typename Solist::pointer pointer;
  65. typedef typename Solist::reference reference;
  66. flist_iterator() : my_node_ptr(0) {}
  67. flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
  68. : my_node_ptr(other.my_node_ptr) {}
  69. flist_iterator& operator=( const flist_iterator<Solist, typename Solist::value_type> &other ) {
  70. my_node_ptr = other.my_node_ptr;
  71. return *this;
  72. }
  73. reference operator*() const { return my_node_ptr->my_element; }
  74. pointer operator->() const { return &**this; }
  75. flist_iterator& operator++() {
  76. my_node_ptr = my_node_ptr->my_next;
  77. return *this;
  78. }
  79. flist_iterator operator++(int) {
  80. flist_iterator tmp = *this;
  81. ++*this;
  82. return tmp;
  83. }
  84. protected:
  85. flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
  86. nodeptr_t get_node_ptr() const { return my_node_ptr; }
  87. nodeptr_t my_node_ptr;
  88. template<typename M, typename T, typename U>
  89. friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
  90. template<typename M, typename T, typename U>
  91. friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
  92. };
  93. template<typename Solist, typename T, typename U>
  94. bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
  95. return i.my_node_ptr == j.my_node_ptr;
  96. }
  97. template<typename Solist, typename T, typename U>
  98. bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
  99. return i.my_node_ptr != j.my_node_ptr;
  100. }
  101. // Split-order list iterators, needed to skip dummy elements
  102. template<class Solist, typename Value>
  103. class solist_iterator : public flist_iterator<Solist, Value>
  104. {
  105. typedef flist_iterator<Solist, Value> base_type;
  106. typedef typename Solist::nodeptr_t nodeptr_t;
  107. using base_type::get_node_ptr;
  108. template <typename T, typename Allocator>
  109. friend class split_ordered_list;
  110. template<class M, typename V>
  111. friend class solist_iterator;
  112. template <typename Traits>
  113. friend class concurrent_unordered_base;
  114. template<typename M, typename T, typename U>
  115. friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
  116. template<typename M, typename T, typename U>
  117. friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
  118. const Solist *my_list_ptr;
  119. solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
  120. public:
  121. typedef typename Solist::value_type value_type;
  122. typedef typename Solist::difference_type difference_type;
  123. typedef typename Solist::pointer pointer;
  124. typedef typename Solist::reference reference;
  125. solist_iterator() {}
  126. solist_iterator( const solist_iterator<Solist, typename Solist::value_type> &other )
  127. : base_type(other), my_list_ptr(other.my_list_ptr) {}
  128. solist_iterator& operator=( const solist_iterator<Solist, typename Solist::value_type> &other ) {
  129. base_type::my_node_ptr = other.get_node_ptr();
  130. my_list_ptr = other.my_list_ptr;
  131. return *this;
  132. }
  133. reference operator*() const {
  134. return this->base_type::operator*();
  135. }
  136. pointer operator->() const {
  137. return (&**this);
  138. }
  139. solist_iterator& operator++() {
  140. do ++(*(base_type *)this);
  141. while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
  142. return (*this);
  143. }
  144. solist_iterator operator++(int) {
  145. solist_iterator tmp = *this;
  146. do ++*this;
  147. while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
  148. return (tmp);
  149. }
  150. };
  151. template<typename Solist, typename T, typename U>
  152. bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
  153. return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
  154. }
  155. template<typename Solist, typename T, typename U>
  156. bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
  157. return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
  158. }
  159. // Forward type and class definitions
  160. typedef size_t sokey_t;
  161. // Forward list in which elements are sorted in a split-order
  162. template <typename T, typename Allocator>
  163. class split_ordered_list
  164. {
  165. public:
  166. typedef split_ordered_list<T, Allocator> self_type;
  167. typedef typename tbb::internal::allocator_rebind<Allocator, T>::type allocator_type;
  168. struct node;
  169. typedef node *nodeptr_t;
  170. typedef typename tbb::internal::allocator_traits<allocator_type>::value_type value_type;
  171. typedef typename tbb::internal::allocator_traits<allocator_type>::size_type size_type;
  172. typedef typename tbb::internal::allocator_traits<allocator_type>::difference_type difference_type;
  173. typedef typename tbb::internal::allocator_traits<allocator_type>::pointer pointer;
  174. typedef typename tbb::internal::allocator_traits<allocator_type>::const_pointer const_pointer;
  175. // No support for reference/const_reference in allocator traits
  176. typedef value_type& reference;
  177. typedef const value_type& const_reference;
  178. typedef solist_iterator<self_type, const value_type> const_iterator;
  179. typedef solist_iterator<self_type, value_type> iterator;
  180. typedef flist_iterator<self_type, const value_type> raw_const_iterator;
  181. typedef flist_iterator<self_type, value_type> raw_iterator;
  182. // Node that holds the element in a split-ordered list
  183. struct node : tbb::internal::no_assign
  184. {
  185. private:
  186. // for compilers that try to generate default constructors though they are not needed.
  187. node(); // VS 2008, 2010, 2012
  188. public:
  189. // Initialize the node with the given order key
  190. void init(sokey_t order_key) {
  191. my_order_key = order_key;
  192. my_next = NULL;
  193. }
  194. // Return the order key (needed for hashing)
  195. sokey_t get_order_key() const { // TODO: remove
  196. return my_order_key;
  197. }
  198. // get() and value() is a common interface for getting access to node`s element (required by node_handle)
  199. value_type* storage() {
  200. return reinterpret_cast<value_type*>(&my_element);
  201. }
  202. value_type& value() {
  203. return *storage();
  204. }
  205. // Inserts the new element in the list in an atomic fashion
  206. nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
  207. {
  208. // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
  209. nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);
  210. if (exchange_node == current_node) // TODO: why this branch?
  211. {
  212. // Operation succeeded, return the new node
  213. return new_node;
  214. }
  215. else
  216. {
  217. // Operation failed, return the "interfering" node
  218. return exchange_node;
  219. }
  220. }
  221. // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
  222. // in the hash table to quickly index into the right subsection of the split-ordered list.
  223. bool is_dummy() const {
  224. return (my_order_key & 0x1) == 0;
  225. }
  226. nodeptr_t my_next; // Next element in the list
  227. value_type my_element; // Element storage
  228. sokey_t my_order_key; // Order key for this element
  229. };
  230. // Allocate a new node with the given order key; used to allocate dummy nodes
  231. nodeptr_t create_node(sokey_t order_key) {
  232. nodeptr_t pnode = my_node_allocator.allocate(1);
  233. pnode->init(order_key);
  234. return (pnode);
  235. }
  236. // Allocate a new node with the given order key and value
  237. template<typename Arg>
  238. nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t,
  239. /*AllowCreate=*/tbb::internal::true_type=tbb::internal::true_type()){
  240. nodeptr_t pnode = my_node_allocator.allocate(1);
  241. //TODO: use RAII scoped guard instead of explicit catch
  242. __TBB_TRY {
  243. new(static_cast<void*>(&pnode->my_element)) T(tbb::internal::forward<Arg>(t));
  244. pnode->init(order_key);
  245. } __TBB_CATCH(...) {
  246. my_node_allocator.deallocate(pnode, 1);
  247. __TBB_RETHROW();
  248. }
  249. return (pnode);
  250. }
  251. // A helper to avoid excessive requiremens in internal_insert
  252. template<typename Arg>
  253. nodeptr_t create_node(sokey_t, __TBB_FORWARDING_REF(Arg),
  254. /*AllowCreate=*/tbb::internal::false_type){
  255. __TBB_ASSERT(false, "This compile-time helper should never get called");
  256. return nodeptr_t();
  257. }
  258. // Allocate a new node with the given parameters for constructing value
  259. template<typename __TBB_PARAMETER_PACK Args>
  260. nodeptr_t create_node_v( __TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args){
  261. nodeptr_t pnode = my_node_allocator.allocate(1);
  262. //TODO: use RAII scoped guard instead of explicit catch
  263. __TBB_TRY {
  264. new(static_cast<void*>(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward<Args>(args)));
  265. } __TBB_CATCH(...) {
  266. my_node_allocator.deallocate(pnode, 1);
  267. __TBB_RETHROW();
  268. }
  269. return (pnode);
  270. }
  271. split_ordered_list(allocator_type a = allocator_type())
  272. : my_node_allocator(a), my_element_count(0)
  273. {
  274. // Immediately allocate a dummy node with order key of 0. This node
  275. // will always be the head of the list.
  276. my_head = create_node(sokey_t(0));
  277. }
  278. ~split_ordered_list()
  279. {
  280. // Clear the list
  281. clear();
  282. // Remove the head element which is not cleared by clear()
  283. nodeptr_t pnode = my_head;
  284. my_head = NULL;
  285. __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
  286. destroy_node(pnode);
  287. }
  288. // Common forward list functions
  289. allocator_type get_allocator() const {
  290. return (my_node_allocator);
  291. }
  292. void clear() {
  293. nodeptr_t pnext;
  294. nodeptr_t pnode = my_head;
  295. __TBB_ASSERT(my_head != NULL, "Invalid head list node");
  296. pnext = pnode->my_next;
  297. pnode->my_next = NULL;
  298. pnode = pnext;
  299. while (pnode != NULL)
  300. {
  301. pnext = pnode->my_next;
  302. destroy_node(pnode);
  303. pnode = pnext;
  304. }
  305. my_element_count = 0;
  306. }
  307. // Returns a first non-dummy element in the SOL
  308. iterator begin() {
  309. return first_real_iterator(raw_begin());
  310. }
  311. // Returns a first non-dummy element in the SOL
  312. const_iterator begin() const {
  313. return first_real_iterator(raw_begin());
  314. }
  315. iterator end() {
  316. return (iterator(0, this));
  317. }
  318. const_iterator end() const {
  319. return (const_iterator(0, this));
  320. }
  321. const_iterator cbegin() const {
  322. return (((const self_type *)this)->begin());
  323. }
  324. const_iterator cend() const {
  325. return (((const self_type *)this)->end());
  326. }
  327. // Checks if the number of elements (non-dummy) is 0
  328. bool empty() const {
  329. return (my_element_count == 0);
  330. }
  331. // Returns the number of non-dummy elements in the list
  332. size_type size() const {
  333. return my_element_count;
  334. }
  335. // Returns the maximum size of the list, determined by the allocator
  336. size_type max_size() const {
  337. return my_node_allocator.max_size();
  338. }
  339. // Swaps 'this' list with the passed in one
  340. void swap(self_type& other)
  341. {
  342. if (this == &other)
  343. {
  344. // Nothing to do
  345. return;
  346. }
  347. std::swap(my_element_count, other.my_element_count);
  348. std::swap(my_head, other.my_head);
  349. }
  350. // Split-order list functions
  351. // Returns a first element in the SOL, which is always a dummy
  352. raw_iterator raw_begin() {
  353. return raw_iterator(my_head);
  354. }
  355. // Returns a first element in the SOL, which is always a dummy
  356. raw_const_iterator raw_begin() const {
  357. return raw_const_iterator(my_head);
  358. }
  359. raw_iterator raw_end() {
  360. return raw_iterator(0);
  361. }
  362. raw_const_iterator raw_end() const {
  363. return raw_const_iterator(0);
  364. }
  365. static sokey_t get_order_key(const raw_const_iterator& it) {
  366. return it.get_node_ptr()->get_order_key();
  367. }
  368. static sokey_t get_safe_order_key(const raw_const_iterator& it) {
  369. if( !it.get_node_ptr() ) return ~sokey_t(0);
  370. return it.get_node_ptr()->get_order_key();
  371. }
  372. // Returns a public iterator version of the internal iterator. Public iterator must not
  373. // be a dummy private iterator.
  374. iterator get_iterator(raw_iterator it) {
  375. __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
  376. return iterator(it.get_node_ptr(), this);
  377. }
  378. // Returns a public iterator version of the internal iterator. Public iterator must not
  379. // be a dummy private iterator.
  380. const_iterator get_iterator(raw_const_iterator it) const {
  381. __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
  382. return const_iterator(it.get_node_ptr(), this);
  383. }
  384. // Returns a non-const version of the raw_iterator
  385. raw_iterator get_iterator(raw_const_iterator it) {
  386. return raw_iterator(it.get_node_ptr());
  387. }
  388. // Returns a non-const version of the iterator
  389. static iterator get_iterator(const_iterator it) {
  390. return iterator(it.my_node_ptr, it.my_list_ptr);
  391. }
  392. // Returns a public iterator version of a first non-dummy internal iterator at or after
  393. // the passed in internal iterator.
  394. iterator first_real_iterator(raw_iterator it)
  395. {
  396. // Skip all dummy, internal only iterators
  397. while (it != raw_end() && it.get_node_ptr()->is_dummy())
  398. ++it;
  399. return iterator(it.get_node_ptr(), this);
  400. }
  401. // Returns a public iterator version of a first non-dummy internal iterator at or after
  402. // the passed in internal iterator.
  403. const_iterator first_real_iterator(raw_const_iterator it) const
  404. {
  405. // Skip all dummy, internal only iterators
  406. while (it != raw_end() && it.get_node_ptr()->is_dummy())
  407. ++it;
  408. return const_iterator(it.get_node_ptr(), this);
  409. }
  410. // Erase an element using the allocator
  411. void destroy_node(nodeptr_t pnode) {
  412. if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);
  413. my_node_allocator.deallocate(pnode, 1);
  414. }
  415. // Try to insert a new element in the list.
  416. // If insert fails, return the node that was inserted instead.
  417. static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
  418. new_node->my_next = current_node;
  419. return previous->atomic_set_next(new_node, current_node);
  420. }
  421. // Insert a new element between passed in iterators
  422. std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
  423. {
  424. nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr());
  425. if (inserted_node == pnode)
  426. {
  427. // If the insert succeeded, check that the order is correct and increment the element count
  428. check_range(it, next);
  429. *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment();
  430. return std::pair<iterator, bool>(iterator(pnode, this), true);
  431. }
  432. else
  433. {
  434. return std::pair<iterator, bool>(end(), false);
  435. }
  436. }
  437. // Insert a new dummy element, starting search at a parent dummy element
  438. raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
  439. {
  440. raw_iterator last = raw_end();
  441. raw_iterator where = it;
  442. __TBB_ASSERT(where != last, "Invalid head node");
  443. ++where;
  444. // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
  445. nodeptr_t dummy_node = create_node(order_key);
  446. for (;;)
  447. {
  448. __TBB_ASSERT(it != last, "Invalid head list node");
  449. // If the head iterator is at the end of the list, or past the point where this dummy
  450. // node needs to be inserted, then try to insert it.
  451. if (where == last || get_order_key(where) > order_key)
  452. {
  453. __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
  454. // Try to insert it in the right place
  455. nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr());
  456. if (inserted_node == dummy_node)
  457. {
  458. // Insertion succeeded, check the list for order violations
  459. check_range(it, where);
  460. return raw_iterator(dummy_node);
  461. }
  462. else
  463. {
  464. // Insertion failed: either dummy node was inserted by another thread, or
  465. // a real element was inserted at exactly the same place as dummy node.
  466. // Proceed with the search from the previous location where order key was
  467. // known to be larger (note: this is legal only because there is no safe
  468. // concurrent erase operation supported).
  469. where = it;
  470. ++where;
  471. continue;
  472. }
  473. }
  474. else if (get_order_key(where) == order_key)
  475. {
  476. // Another dummy node with the same value found, discard the new one.
  477. destroy_node(dummy_node);
  478. return where;
  479. }
  480. // Move the iterator forward
  481. it = where;
  482. ++where;
  483. }
  484. }
  485. nodeptr_t erase_node_impl(raw_iterator previous, raw_const_iterator& where) {
  486. nodeptr_t pnode = (where++).get_node_ptr();
  487. nodeptr_t prevnode = previous.get_node_ptr();
  488. __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
  489. prevnode->my_next = pnode->my_next;
  490. return pnode;
  491. }
  492. // This erase function can handle both real and dummy nodes
  493. void erase_node(raw_iterator previous, raw_const_iterator& where,
  494. /*allow_destroy*/tbb::internal::true_type)
  495. {
  496. nodeptr_t pnode = erase_node_impl(previous, where);
  497. destroy_node(pnode);
  498. }
  499. void erase_node(raw_iterator previous, raw_const_iterator& where,
  500. /*allow_destroy*/tbb::internal::false_type)
  501. {
  502. erase_node_impl(previous, where);
  503. }
  504. void erase_node(raw_iterator previous, raw_const_iterator& where) {
  505. erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
  506. }
  507. // Erase the element (previous node needs to be passed because this is a forward only list)
  508. template<typename AllowDestroy>
  509. iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
  510. {
  511. raw_const_iterator it = where;
  512. erase_node(previous, it, AllowDestroy());
  513. my_element_count--;
  514. return get_iterator(first_real_iterator(it));
  515. }
  516. iterator erase_node(raw_iterator previous, const_iterator& where) {
  517. return erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
  518. }
  519. // Move all elements from the passed in split-ordered list to this one
  520. void move_all(self_type& source)
  521. {
  522. raw_const_iterator first = source.raw_begin();
  523. raw_const_iterator last = source.raw_end();
  524. if (first == last)
  525. return;
  526. nodeptr_t previous_node = my_head;
  527. raw_const_iterator begin_iterator = first++;
  528. // Move all elements one by one, including dummy ones
  529. for (raw_const_iterator it = first; it != last;)
  530. {
  531. nodeptr_t pnode = it.get_node_ptr();
  532. nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
  533. previous_node = try_insert_atomic(previous_node, dummy_node, NULL);
  534. __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
  535. raw_const_iterator where = it++;
  536. source.erase_node(get_iterator(begin_iterator), where);
  537. }
  538. check_range();
  539. }
  540. private:
  541. //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base
  542. template <typename Traits>
  543. friend class concurrent_unordered_base;
  544. // Check the list for order violations
  545. void check_range( raw_iterator first, raw_iterator last )
  546. {
  547. #if TBB_USE_ASSERT
  548. for (raw_iterator it = first; it != last; ++it)
  549. {
  550. raw_iterator next = it;
  551. ++next;
  552. __TBB_ASSERT(next == raw_end() || get_order_key(next) >= get_order_key(it), "!!! List order inconsistency !!!");
  553. }
  554. #else
  555. tbb::internal::suppress_unused_warning(first, last);
  556. #endif
  557. }
  558. void check_range()
  559. {
  560. #if TBB_USE_ASSERT
  561. check_range( raw_begin(), raw_end() );
  562. #endif
  563. }
  564. typename tbb::internal::allocator_rebind<allocator_type, node>::type my_node_allocator; // allocator object for nodes
  565. size_type my_element_count; // Total item count, not counting dummy nodes
  566. nodeptr_t my_head; // pointer to head node
  567. };
  568. #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
  569. #pragma warning(push)
  570. #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
  571. #endif
  572. template <typename Traits>
  573. class concurrent_unordered_base : public Traits
  574. {
  575. protected:
  576. // Type definitions
  577. typedef concurrent_unordered_base<Traits> self_type;
  578. typedef typename Traits::value_type value_type;
  579. typedef typename Traits::key_type key_type;
  580. typedef typename Traits::hash_compare hash_compare;
  581. typedef typename Traits::allocator_type allocator_type;
  582. typedef typename hash_compare::hasher hasher;
  583. typedef typename hash_compare::key_equal key_equal;
  584. typedef typename tbb::internal::allocator_traits<allocator_type>::size_type size_type;
  585. typedef typename tbb::internal::allocator_traits<allocator_type>::difference_type difference_type;
  586. typedef typename tbb::internal::allocator_traits<allocator_type>::pointer pointer;
  587. typedef typename tbb::internal::allocator_traits<allocator_type>::const_pointer const_pointer;
  588. // No support for reference/const_reference in allocator
  589. typedef typename allocator_type::value_type& reference;
  590. typedef const typename allocator_type::value_type& const_reference;
  591. typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
  592. typedef typename solist_t::nodeptr_t nodeptr_t;
  593. // Iterators that walk the entire split-order list, including dummy nodes
  594. typedef typename solist_t::raw_iterator raw_iterator;
  595. typedef typename solist_t::raw_const_iterator raw_const_iterator;
  596. typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
  597. typedef typename solist_t::const_iterator const_iterator;
  598. typedef iterator local_iterator;
  599. typedef const_iterator const_local_iterator;
  600. #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
  601. typedef typename Traits::node_type node_type;
  602. #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
  603. using Traits::my_hash_compare;
  604. using Traits::get_key;
  605. using Traits::allow_multimapping;
  606. static const size_type initial_bucket_number = 8; // Initial number of buckets
  607. private:
  608. template<typename OtherTraits>
  609. friend class concurrent_unordered_base;
  610. typedef std::pair<iterator, iterator> pairii_t;
  611. typedef std::pair<const_iterator, const_iterator> paircc_t;
  612. static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
  613. static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
  614. struct call_internal_clear_on_exit{
  615. concurrent_unordered_base* my_instance;
  616. call_internal_clear_on_exit(concurrent_unordered_base* instance) : my_instance(instance) {}
  617. void dismiss(){ my_instance = NULL;}
  618. ~call_internal_clear_on_exit(){
  619. if (my_instance){
  620. my_instance->internal_clear();
  621. }
  622. }
  623. };
  624. protected:
  625. // Constructors/Destructors
  626. concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
  627. const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
  628. : Traits(hc), my_solist(a),
  629. my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
  630. {
  631. if( n_of_buckets == 0) ++n_of_buckets;
  632. my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
  633. internal_init();
  634. }
  635. concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
  636. : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
  637. {
  638. internal_init();
  639. internal_copy(right);
  640. }
  641. concurrent_unordered_base(const concurrent_unordered_base& right)
  642. : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
  643. {
  644. //FIXME:exception safety seems to be broken here
  645. internal_init();
  646. internal_copy(right);
  647. }
  648. #if __TBB_CPP11_RVALUE_REF_PRESENT
  649. concurrent_unordered_base(concurrent_unordered_base&& right)
  650. : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()),
  651. my_maximum_bucket_size(float(initial_bucket_load))
  652. {
  653. my_number_of_buckets = initial_bucket_number;
  654. internal_init();
  655. swap(right);
  656. }
  657. concurrent_unordered_base(concurrent_unordered_base&& right, const allocator_type& a)
  658. : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
  659. {
  660. call_internal_clear_on_exit clear_buckets_on_exception(this);
  661. internal_init();
  662. if (a == right.get_allocator()){
  663. my_number_of_buckets = initial_bucket_number;
  664. my_maximum_bucket_size = float(initial_bucket_load);
  665. this->swap(right);
  666. }else{
  667. my_maximum_bucket_size = right.my_maximum_bucket_size;
  668. my_number_of_buckets = right.my_number_of_buckets;
  669. my_solist.my_element_count = right.my_solist.my_element_count;
  670. if (! right.my_solist.empty()){
  671. nodeptr_t previous_node = my_solist.my_head;
  672. // Move all elements one by one, including dummy ones
  673. for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)
  674. {
  675. const nodeptr_t pnode = it.get_node_ptr();
  676. nodeptr_t node;
  677. if (pnode->is_dummy()) {
  678. node = my_solist.create_node(pnode->get_order_key());
  679. size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;
  680. set_bucket(bucket, node);
  681. }else{
  682. node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
  683. }
  684. previous_node = my_solist.try_insert_atomic(previous_node, node, NULL);
  685. __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
  686. }
  687. my_solist.check_range();
  688. }
  689. }
  690. clear_buckets_on_exception.dismiss();
  691. }
  692. #endif // __TBB_CPP11_RVALUE_REF_PRESENT
  693. concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
  694. if (this != &right)
  695. internal_copy(right);
  696. return (*this);
  697. }
  698. #if __TBB_CPP11_RVALUE_REF_PRESENT
  699. concurrent_unordered_base& operator=(concurrent_unordered_base&& other)
  700. {
  701. if(this != &other){
  702. typedef typename tbb::internal::allocator_traits<allocator_type>::propagate_on_container_move_assignment pocma_t;
  703. if(pocma_t::value || this->my_allocator == other.my_allocator) {
  704. concurrent_unordered_base trash (std::move(*this));
  705. swap(other);
  706. if (pocma_t::value) {
  707. using std::swap;
  708. //TODO: swapping allocators here may be a problem, replace with single direction moving
  709. swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);
  710. swap(this->my_allocator, other.my_allocator);
  711. }
  712. } else {
  713. concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);
  714. this->swap(moved_copy);
  715. }
  716. }
  717. return *this;
  718. }
  719. #endif // __TBB_CPP11_RVALUE_REF_PRESENT
  720. #if __TBB_INITIALIZER_LISTS_PRESENT
  721. //! assignment operator from initializer_list
  722. concurrent_unordered_base& operator=(std::initializer_list<value_type> il)
  723. {
  724. this->clear();
  725. this->insert(il.begin(),il.end());
  726. return (*this);
  727. }
  728. #endif // __TBB_INITIALIZER_LISTS_PRESENT
  729. ~concurrent_unordered_base() {
  730. // Delete all node segments
  731. internal_clear();
  732. }
  733. #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
  734. template<typename SourceType>
  735. void internal_merge(SourceType& source) {
  736. typedef typename SourceType::iterator source_iterator;
  737. __TBB_STATIC_ASSERT((tbb::internal::is_same_type<node_type,
  738. typename SourceType::node_type>::value),
  739. "Incompatible containers cannot be merged");
  740. for(source_iterator it = source.begin(); it != source.end();) {
  741. source_iterator where = it++;
  742. if (allow_multimapping || find(get_key(*where)) == end()) {
  743. std::pair<node_type, raw_iterator> extract_result = source.internal_extract(where);
  744. // Remember the old order key
  745. sokey_t old_order_key = extract_result.first.my_node->get_order_key();
  746. // If the insertion fails, it returns ownership of the node to extract_result.first
  747. // extract_result.first remains valid node handle
  748. if (!insert(std::move(extract_result.first)).second) {
  749. raw_iterator next = extract_result.second;
  750. raw_iterator current = next++;
  751. // Revert order key to old value
  752. extract_result.first.my_node->init(old_order_key);
  753. __TBB_ASSERT(extract_result.first.my_node->get_order_key() >= current.get_node_ptr()->get_order_key(),
  754. "Wrong nodes order in source container");
  755. __TBB_ASSERT(next==source.my_solist.raw_end() ||
  756. extract_result.first.my_node->get_order_key() <= next.get_node_ptr()->get_order_key(),
  757. "Wrong nodes order in source container");
  758. size_t new_count = 0;// To use try_insert()
  759. bool insert_result =
  760. source.my_solist.try_insert(current, next, extract_result.first.my_node, &new_count).second;
  761. __TBB_ASSERT_EX(insert_result, "Return to source must be successful. "
  762. "Changing source container while merging is unsafe.");
  763. }
  764. extract_result.first.deactivate();
  765. }
  766. }
  767. }
  768. #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
  769. public:
  770. allocator_type get_allocator() const {
  771. return my_solist.get_allocator();
  772. }
  773. // Size and capacity function
  774. bool empty() const {
  775. return my_solist.empty();
  776. }
  777. size_type size() const {
  778. return my_solist.size();
  779. }
  780. size_type max_size() const {
  781. return my_solist.max_size();
  782. }
  783. // Iterators
  784. iterator begin() {
  785. return my_solist.begin();
  786. }
  787. const_iterator begin() const {
  788. return my_solist.begin();
  789. }
  790. iterator end() {
  791. return my_solist.end();
  792. }
  793. const_iterator end() const {
  794. return my_solist.end();
  795. }
  796. const_iterator cbegin() const {
  797. return my_solist.cbegin();
  798. }
  799. const_iterator cend() const {
  800. return my_solist.cend();
  801. }
  802. // Parallel traversal support
  803. class const_range_type : tbb::internal::no_assign {
  804. const concurrent_unordered_base &my_table;
  805. raw_const_iterator my_begin_node;
  806. raw_const_iterator my_end_node;
  807. mutable raw_const_iterator my_midpoint_node;
  808. public:
  809. //! Type for size of a range
  810. typedef typename concurrent_unordered_base::size_type size_type;
  811. typedef typename concurrent_unordered_base::value_type value_type;
  812. typedef typename concurrent_unordered_base::reference reference;
  813. typedef typename concurrent_unordered_base::difference_type difference_type;
  814. typedef typename concurrent_unordered_base::const_iterator iterator;
  815. //! True if range is empty.
  816. bool empty() const {return my_begin_node == my_end_node;}
  817. //! True if range can be partitioned into two subranges.
  818. bool is_divisible() const {
  819. return my_midpoint_node != my_end_node;
  820. }
  821. //! Split range.
  822. const_range_type( const_range_type &r, split ) :
  823. my_table(r.my_table), my_end_node(r.my_end_node)
  824. {
  825. r.my_end_node = my_begin_node = r.my_midpoint_node;
  826. __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
  827. __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
  828. set_midpoint();
  829. r.set_midpoint();
  830. }
  831. //! Init range with container and grainsize specified
  832. const_range_type( const concurrent_unordered_base &a_table ) :
  833. my_table(a_table), my_begin_node(a_table.my_solist.begin()),
  834. my_end_node(a_table.my_solist.end())
  835. {
  836. set_midpoint();
  837. }
  838. iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
  839. iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
  840. //! The grain size for this range.
  841. size_type grainsize() const { return 1; }
  842. //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
  843. void set_midpoint() const {
  844. if( my_begin_node == my_end_node ) // not divisible
  845. my_midpoint_node = my_end_node;
  846. else {
  847. sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
  848. sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
  849. size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
  850. while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
  851. if(__TBB_ReverseBits(mid_bucket) > begin_key) {
  852. // found a dummy_node between begin and end
  853. my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
  854. }
  855. else {
  856. // didn't find a dummy node between begin and end.
  857. my_midpoint_node = my_end_node;
  858. }
  859. #if TBB_USE_ASSERT
  860. {
  861. sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
  862. __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
  863. __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
  864. }
  865. #endif // TBB_USE_ASSERT
  866. }
  867. }
  868. };
  869. class range_type : public const_range_type {
  870. public:
  871. typedef typename concurrent_unordered_base::iterator iterator;
  872. //! Split range.
  873. range_type( range_type &r, split ) : const_range_type( r, split() ) {}
  874. //! Init range with container and grainsize specified
  875. range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
  876. iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
  877. iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
  878. };
  879. range_type range() {
  880. return range_type( *this );
  881. }
  882. const_range_type range() const {
  883. return const_range_type( *this );
  884. }
  885. // Modifiers
  886. std::pair<iterator, bool> insert(const value_type& value) {
  887. return internal_insert</*AllowCreate=*/tbb::internal::true_type,
  888. /*AllowDestroy=*/tbb::internal::true_type>(value);
  889. }
  890. iterator insert(const_iterator, const value_type& value) {
  891. // Ignore hint
  892. return insert(value).first;
  893. }
  894. #if __TBB_CPP11_RVALUE_REF_PRESENT
  895. std::pair<iterator, bool> insert(value_type&& value) {
  896. return internal_insert</*AllowCreate=*/tbb::internal::true_type,
  897. /*AllowDestroy=*/tbb::internal::true_type>(std::move(value));
  898. }
  899. iterator insert(const_iterator, value_type&& value) {
  900. // Ignore hint
  901. return insert(std::move(value)).first;
  902. }
  903. #endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/
  904. #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
  905. std::pair<iterator, bool> insert(node_type&& nh) {
  906. if (!nh.empty()) {
  907. nodeptr_t handled_node = nh.my_node;
  908. std::pair<iterator, bool> insert_result =
  909. internal_insert</*AllowCreate=*/tbb::internal::false_type,
  910. /*AllowDestroy=*/tbb::internal::false_type>
  911. (handled_node->my_element, handled_node);
  912. if (insert_result.second)
  913. nh.deactivate();
  914. return insert_result;
  915. }
  916. return std::pair<iterator, bool>(end(), false);
  917. }
  918. iterator insert(const_iterator, node_type&& nh) {
  919. return insert(std::move(nh)).first;
  920. }
  921. #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
  922. #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
  923. template<typename... Args>
  924. std::pair<iterator, bool> emplace(Args&&... args) {
  925. nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward<Args>(args)...);
  926. return internal_insert</*AllowCreate=*/tbb::internal::false_type,
  927. /*AllowDestroy=*/tbb::internal::true_type>(pnode->my_element, pnode);
  928. }
  929. template<typename... Args>
  930. iterator emplace_hint(const_iterator, Args&&... args) {
  931. // Ignore hint
  932. return emplace(tbb::internal::forward<Args>(args)...).first;
  933. }
  934. #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
  935. template<class Iterator>
  936. void insert(Iterator first, Iterator last) {
  937. for (Iterator it = first; it != last; ++it)
  938. insert(*it);
  939. }
  940. #if __TBB_INITIALIZER_LISTS_PRESENT
  941. //! Insert initializer list
  942. void insert(std::initializer_list<value_type> il) {
  943. insert(il.begin(), il.end());
  944. }
  945. #endif
  946. iterator unsafe_erase(const_iterator where) {
  947. return internal_erase(where);
  948. }
  949. iterator unsafe_erase(const_iterator first, const_iterator last) {
  950. while (first != last)
  951. unsafe_erase(first++);
  952. return my_solist.get_iterator(first);
  953. }
  954. size_type unsafe_erase(const key_type& key) {
  955. pairii_t where = equal_range(key);
  956. size_type item_count = internal_distance(where.first, where.second);
  957. unsafe_erase(where.first, where.second);
  958. return item_count;
  959. }
  960. #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
  961. node_type unsafe_extract(const_iterator where) {
  962. return internal_extract(where).first;
  963. }
  964. node_type unsafe_extract(const key_type& key) {
  965. pairii_t where = equal_range(key);
  966. if (where.first == end()) return node_type(); // element was not found
  967. return internal_extract(where.first).first;
  968. }
  969. #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
  970. void swap(concurrent_unordered_base& right) {
  971. if (this != &right) {
  972. std::swap(my_hash_compare, right.my_hash_compare);
  973. my_solist.swap(right.my_solist);
  974. internal_swap_buckets(right);
  975. std::swap(my_number_of_buckets, right.my_number_of_buckets);
  976. std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
  977. }
  978. }
  979. // Observers
  980. hasher hash_function() const {
  981. return my_hash_compare.my_hash_object;
  982. }
  983. key_equal key_eq() const {
  984. return my_hash_compare.my_key_compare_object;
  985. }
  986. void clear() {
  987. // Clear list
  988. my_solist.clear();
  989. // Clear buckets
  990. internal_clear();
  991. // Initialize bucket 0
  992. __TBB_ASSERT(my_buckets[0] == NULL, NULL);
  993. raw_iterator dummy_node = my_solist.raw_begin();
  994. set_bucket(0, dummy_node);
  995. }
  996. // Lookup
  997. iterator find(const key_type& key) {
  998. return internal_find(key);
  999. }
  1000. const_iterator find(const key_type& key) const {
  1001. return const_cast<self_type*>(this)->internal_find(key);
  1002. }
  1003. size_type count(const key_type& key) const {
  1004. if(allow_multimapping) {
  1005. paircc_t answer = equal_range(key);
  1006. size_type item_count = internal_distance(answer.first, answer.second);
  1007. return item_count;
  1008. } else {
  1009. return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
  1010. }
  1011. }
  1012. std::pair<iterator, iterator> equal_range(const key_type& key) {
  1013. return internal_equal_range(key);
  1014. }
  1015. std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
  1016. return const_cast<self_type*>(this)->internal_equal_range(key);
  1017. }
  1018. // Bucket interface - for debugging
  1019. size_type unsafe_bucket_count() const {
  1020. return my_number_of_buckets;
  1021. }
  1022. size_type unsafe_max_bucket_count() const {
  1023. return segment_size(pointers_per_table-1);
  1024. }
  1025. size_type unsafe_bucket_size(size_type bucket) {
  1026. size_type item_count = 0;
  1027. if (is_initialized(bucket)) {
  1028. raw_iterator it = get_bucket(bucket);
  1029. ++it;
  1030. for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
  1031. ++item_count;
  1032. }
  1033. return item_count;
  1034. }
  1035. size_type unsafe_bucket(const key_type& key) const {
  1036. sokey_t order_key = (sokey_t) my_hash_compare(key);
  1037. size_type bucket = order_key % my_number_of_buckets;
  1038. return bucket;
  1039. }
  1040. // If the bucket is initialized, return a first non-dummy element in it
  1041. local_iterator unsafe_begin(size_type bucket) {
  1042. if (!is_initialized(bucket))
  1043. return end();
  1044. raw_iterator it = get_bucket(bucket);
  1045. return my_solist.first_real_iterator(it);
  1046. }
  1047. // If the bucket is initialized, return a first non-dummy element in it
  1048. const_local_iterator unsafe_begin(size_type bucket) const
  1049. {
  1050. if (!is_initialized(bucket))
  1051. return end();
  1052. raw_const_iterator it = get_bucket(bucket);
  1053. return my_solist.first_real_iterator(it);
  1054. }
  1055. // @REVIEW: Takes O(n)
  1056. // Returns the iterator after the last non-dummy element in the bucket
  1057. local_iterator unsafe_end(size_type bucket)
  1058. {
  1059. if (!is_initialized(bucket))
  1060. return end();
  1061. raw_iterator it = get_bucket(bucket);
  1062. // Find the end of the bucket, denoted by the dummy element
  1063. do ++it;
  1064. while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
  1065. // Return the first real element past the end of the bucket
  1066. return my_solist.first_real_iterator(it);
  1067. }
  1068. // @REVIEW: Takes O(n)
  1069. // Returns the iterator after the last non-dummy element in the bucket
  1070. const_local_iterator unsafe_end(size_type bucket) const
  1071. {
  1072. if (!is_initialized(bucket))
  1073. return end();
  1074. raw_const_iterator it = get_bucket(bucket);
  1075. // Find the end of the bucket, denoted by the dummy element
  1076. do ++it;
  1077. while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
  1078. // Return the first real element past the end of the bucket
  1079. return my_solist.first_real_iterator(it);
  1080. }
  1081. const_local_iterator unsafe_cbegin(size_type bucket) const {
  1082. return ((const self_type *) this)->unsafe_begin(bucket);
  1083. }
  1084. const_local_iterator unsafe_cend(size_type bucket) const {
  1085. return ((const self_type *) this)->unsafe_end(bucket);
  1086. }
  1087. // Hash policy
  1088. float load_factor() const {
  1089. return (float) size() / (float) unsafe_bucket_count();
  1090. }
  1091. float max_load_factor() const {
  1092. return my_maximum_bucket_size;
  1093. }
  1094. void max_load_factor(float newmax) {
  1095. if (newmax != newmax || newmax < 0)
  1096. tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
  1097. my_maximum_bucket_size = newmax;
  1098. }
  1099. // This function is a noop, because the underlying split-ordered list
  1100. // is already sorted, so an increase in the bucket number will be
  1101. // reflected next time this bucket is touched.
  1102. void rehash(size_type buckets) {
  1103. size_type current_buckets = my_number_of_buckets;
  1104. if (current_buckets >= buckets)
  1105. return;
  1106. my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
  1107. }
  1108. private:
  1109. // Initialize the hash and keep the first bucket open
  1110. void internal_init() {
  1111. // Initialize the array of segment pointers
  1112. memset(my_buckets, 0, sizeof(my_buckets));
  1113. // Initialize bucket 0
  1114. raw_iterator dummy_node = my_solist.raw_begin();
  1115. set_bucket(0, dummy_node);
  1116. }
  1117. void internal_clear() {
  1118. for (size_type index = 0; index < pointers_per_table; ++index) {
  1119. if (my_buckets[index] != NULL) {
  1120. size_type sz = segment_size(index);
  1121. for (size_type index2 = 0; index2 < sz; ++index2)
  1122. my_allocator.destroy(&my_buckets[index][index2]);
  1123. my_allocator.deallocate(my_buckets[index], sz);
  1124. my_buckets[index] = 0;
  1125. }
  1126. }
  1127. }
  1128. void internal_copy(const self_type& right) {
  1129. clear();
  1130. my_maximum_bucket_size = right.my_maximum_bucket_size;
  1131. my_number_of_buckets = right.my_number_of_buckets;
  1132. __TBB_TRY {
  1133. insert(right.begin(), right.end());
  1134. my_hash_compare = right.my_hash_compare;
  1135. } __TBB_CATCH(...) {
  1136. my_solist.clear();
  1137. __TBB_RETHROW();
  1138. }
  1139. }
  1140. void internal_swap_buckets(concurrent_unordered_base& right)
  1141. {
  1142. // Swap all node segments
  1143. for (size_type index = 0; index < pointers_per_table; ++index)
  1144. {
  1145. raw_iterator * iterator_pointer = my_buckets[index];
  1146. my_buckets[index] = right.my_buckets[index];
  1147. right.my_buckets[index] = iterator_pointer;
  1148. }
  1149. }
  1150. //TODO: why not use std::distance?
  1151. // Hash APIs
  1152. static size_type internal_distance(const_iterator first, const_iterator last)
  1153. {
  1154. size_type num = 0;
  1155. for (const_iterator it = first; it != last; ++it)
  1156. ++num;
  1157. return num;
  1158. }
  1159. // Insert an element in the hash given its value
  1160. template<typename AllowCreate, typename AllowDestroy, typename ValueType>
  1161. std::pair<iterator, bool> internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL)
  1162. {
  1163. const key_type *pkey = &get_key(value);
  1164. sokey_t hash_key = (sokey_t) my_hash_compare(*pkey);
  1165. size_type new_count = 0;
  1166. sokey_t order_key = split_order_key_regular(hash_key);
  1167. raw_iterator previous = prepare_bucket(hash_key);
  1168. raw_iterator last = my_solist.raw_end();
  1169. __TBB_ASSERT(previous != last, "Invalid head node");
  1170. if (pnode) {
  1171. // Set new order_key to node
  1172. pnode->init(order_key);
  1173. }
  1174. // First node is a dummy node
  1175. for (raw_iterator where = previous;;)
  1176. {
  1177. ++where;
  1178. if (where == last || solist_t::get_order_key(where) > order_key ||
  1179. // if multimapped, stop at the first item equal to us.
  1180. (allow_multimapping && solist_t::get_order_key(where) == order_key &&
  1181. !my_hash_compare(get_key(*where), *pkey))) // TODO: fix negation
  1182. {
  1183. if (!pnode) {
  1184. pnode = my_solist.create_node(order_key, tbb::internal::forward<ValueType>(value), AllowCreate());
  1185. // If the value was moved, the known reference to key might be invalid
  1186. pkey = &get_key(pnode->my_element);
  1187. }
  1188. // Try to insert 'pnode' between 'previous' and 'where'
  1189. std::pair<iterator, bool> result = my_solist.try_insert(previous, where, pnode, &new_count);
  1190. if (result.second)
  1191. {
  1192. // Insertion succeeded, adjust the table size, if needed
  1193. adjust_table_size(new_count, my_number_of_buckets);
  1194. return result;
  1195. }
  1196. else
  1197. {
  1198. // Insertion failed: either the same node was inserted by another thread, or
  1199. // another element was inserted at exactly the same place as this node.
  1200. // Proceed with the search from the previous location where order key was
  1201. // known to be larger (note: this is legal only because there is no safe
  1202. // concurrent erase operation supported).
  1203. where = previous;
  1204. continue;
  1205. }
  1206. }
  1207. else if (!allow_multimapping && solist_t::get_order_key(where) == order_key &&
  1208. !my_hash_compare(get_key(*where), *pkey)) // TODO: fix negation
  1209. { // Element already in the list, return it
  1210. if (pnode && AllowDestroy::value)
  1211. my_solist.destroy_node(pnode);
  1212. return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
  1213. }
  1214. // Move the iterator forward
  1215. previous = where;
  1216. }
  1217. }
  1218. // Find the element in the split-ordered list
  1219. iterator internal_find(const key_type& key)
  1220. {
  1221. sokey_t hash_key = (sokey_t) my_hash_compare(key);
  1222. sokey_t order_key = split_order_key_regular(hash_key);
  1223. raw_iterator last = my_solist.raw_end();
  1224. for (raw_iterator it = prepare_bucket(hash_key); it != last; ++it)
  1225. {
  1226. if (solist_t::get_order_key(it) > order_key)
  1227. {
  1228. // If the order key is smaller than the current order key, the element
  1229. // is not in the hash.
  1230. return end();
  1231. }
  1232. else if (solist_t::get_order_key(it) == order_key)
  1233. {
  1234. // The fact that order keys match does not mean that the element is found.
  1235. // Key function comparison has to be performed to check whether this is the
  1236. // right element. If not, keep searching while order key is the same.
  1237. if (!my_hash_compare(get_key(*it), key)) // TODO: fix negation
  1238. return my_solist.get_iterator(it);
  1239. }
  1240. }
  1241. return end();
  1242. }
  1243. // Erase an element from the list. This is not a concurrency safe function.
  1244. iterator internal_erase(const_iterator it)
  1245. {
  1246. sokey_t hash_key = (sokey_t) my_hash_compare(get_key(*it));
  1247. raw_iterator previous = prepare_bucket(hash_key);
  1248. raw_iterator last = my_solist.raw_end();
  1249. __TBB_ASSERT(previous != last, "Invalid head node");
  1250. // First node is a dummy node
  1251. for (raw_iterator where = previous; where != last; previous = where) {
  1252. ++where;
  1253. if (my_solist.get_iterator(where) == it)
  1254. return my_solist.erase_node(previous, it);
  1255. }
  1256. return end();
  1257. }
  1258. #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
  1259. std::pair<node_type, raw_iterator> internal_extract(const_iterator it) {
  1260. sokey_t hash_key = sokey_t(my_hash_compare(get_key(*it)));
  1261. raw_iterator previous = prepare_bucket(hash_key);
  1262. raw_iterator last = my_solist.raw_end();
  1263. __TBB_ASSERT(previous != last, "Invalid head node");
  1264. for(raw_iterator where = previous; where != last; previous = where) {
  1265. ++where;
  1266. if (my_solist.get_iterator(where) == it) {
  1267. const_iterator result = it;
  1268. my_solist.erase_node(previous, it, /*allow_destroy*/tbb::internal::false_type());
  1269. return std::pair<node_type, raw_iterator>( node_type(result.get_node_ptr()),
  1270. previous);
  1271. }
  1272. }
  1273. return std::pair<node_type, iterator>(node_type(), end());
  1274. }
  1275. #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
  1276. // Return the [begin, end) pair of iterators with the same key values.
  1277. // This operation makes sense only if mapping is many-to-one.
  1278. pairii_t internal_equal_range(const key_type& key)
  1279. {
  1280. sokey_t hash_key = (sokey_t) my_hash_compare(key);
  1281. sokey_t order_key = split_order_key_regular(hash_key);
  1282. raw_iterator end_it = my_solist.raw_end();
  1283. for (raw_iterator it = prepare_bucket(hash_key); it != end_it; ++it)
  1284. {
  1285. if (solist_t::get_order_key(it) > order_key)
  1286. {
  1287. // There is no element with the given key
  1288. return pairii_t(end(), end());
  1289. }
  1290. else if (solist_t::get_order_key(it) == order_key &&
  1291. !my_hash_compare(get_key(*it), key)) // TODO: fix negation; also below
  1292. {
  1293. iterator first = my_solist.get_iterator(it);
  1294. iterator last = first;
  1295. do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
  1296. return pairii_t(first, last);
  1297. }
  1298. }
  1299. return pairii_t(end(), end());
  1300. }
  1301. // Bucket APIs
  1302. void init_bucket(size_type bucket)
  1303. {
  1304. // Bucket 0 has no parent.
  1305. __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
  1306. size_type parent_bucket = get_parent(bucket);
  1307. // All parent_bucket buckets have to be initialized before this bucket is
  1308. if (!is_initialized(parent_bucket))
  1309. init_bucket(parent_bucket);
  1310. raw_iterator parent = get_bucket(parent_bucket);
  1311. // Create a dummy first node in this bucket
  1312. raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
  1313. set_bucket(bucket, dummy_node);
  1314. }
  1315. void adjust_table_size(size_type total_elements, size_type current_size)
  1316. {
  1317. // Grow the table by a factor of 2 if possible and needed
  1318. if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
  1319. {
  1320. // Double the size of the hash only if size has not changed in between loads
  1321. my_number_of_buckets.compare_and_swap(2u*current_size, current_size);
  1322. //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
  1323. //due to overzealous compiler warnings in /Wp64 mode
  1324. }
  1325. }
  1326. size_type get_parent(size_type bucket) const
  1327. {
  1328. // Unsets bucket's most significant turned-on bit
  1329. size_type msb = __TBB_Log2((uintptr_t)bucket);
  1330. return bucket & ~(size_type(1) << msb);
  1331. }
  1332. // Dynamic sized array (segments)
  1333. //! @return segment index of given index in the array
  1334. static size_type segment_index_of( size_type index ) {
  1335. return size_type( __TBB_Log2( uintptr_t(index|1) ) );
  1336. }
  1337. //! @return the first array index of given segment
  1338. static size_type segment_base( size_type k ) {
  1339. return (size_type(1)<<k & ~size_type(1));
  1340. }
  1341. //! @return segment size
  1342. static size_type segment_size( size_type k ) {
  1343. return k? size_type(1)<<k : 2;
  1344. }
  1345. raw_iterator get_bucket(size_type bucket) const {
  1346. size_type segment = segment_index_of(bucket);
  1347. bucket -= segment_base(segment);
  1348. __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
  1349. return my_buckets[segment][bucket];
  1350. }
  1351. raw_iterator prepare_bucket(sokey_t hash_key) {
  1352. size_type bucket = hash_key % my_number_of_buckets;
  1353. size_type segment = segment_index_of(bucket);
  1354. size_type index = bucket - segment_base(segment);
  1355. if (my_buckets[segment] == NULL || my_buckets[segment][index].get_node_ptr() == NULL)
  1356. init_bucket(bucket);
  1357. return my_buckets[segment][index];
  1358. }
  1359. void set_bucket(size_type bucket, raw_iterator dummy_head) {
  1360. size_type segment = segment_index_of(bucket);
  1361. bucket -= segment_base(segment);
  1362. if (my_buckets[segment] == NULL) {
  1363. size_type sz = segment_size(segment);
  1364. raw_iterator * new_segment = my_allocator.allocate(sz);
  1365. std::memset(static_cast<void*>(new_segment), 0, sz*sizeof(raw_iterator));
  1366. if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)
  1367. my_allocator.deallocate(new_segment, sz);
  1368. }
  1369. my_buckets[segment][bucket] = dummy_head;
  1370. }
  1371. bool is_initialized(size_type bucket) const {
  1372. size_type segment = segment_index_of(bucket);
  1373. bucket -= segment_base(segment);
  1374. if (my_buckets[segment] == NULL)
  1375. return false;
  1376. raw_iterator it = my_buckets[segment][bucket];
  1377. return (it.get_node_ptr() != NULL);
  1378. }
  1379. // Utilities for keys
  1380. // A regular order key has its original hash value reversed and the last bit set
  1381. sokey_t split_order_key_regular(sokey_t order_key) const {
  1382. return __TBB_ReverseBits(order_key) | 0x1;
  1383. }
  1384. // A dummy order key has its original hash value reversed and the last bit unset
  1385. sokey_t split_order_key_dummy(sokey_t order_key) const {
  1386. return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);
  1387. }
  1388. // Shared variables
  1389. atomic<size_type> my_number_of_buckets; // Current table size
  1390. solist_t my_solist; // List where all the elements are kept
  1391. typename tbb::internal::allocator_rebind<allocator_type, raw_iterator>::type my_allocator; // Allocator object for segments
  1392. float my_maximum_bucket_size; // Maximum size of the bucket
  1393. atomic<raw_iterator*> my_buckets[pointers_per_table]; // The segment table
  1394. };
  1395. #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
  1396. #pragma warning(pop) // warning 4127 is back
  1397. #endif
  1398. } // namespace internal
  1399. //! @endcond
  1400. } // namespace interface5
  1401. } // namespace tbb
  1402. #endif // __TBB__concurrent_unordered_impl_H