Map.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /******************************************************************************/
  2. #include "stdafx.h"
  3. namespace EE{
  4. /******************************************************************************
  5. We must unlock 'D._lock' everywhere before we want to lock 'T._lock'
  6. This is to avoid deadlocks, when one thread locks first 'D._lock' and then 'T._lock'
  7. and another thread locks first 'T._lock' and then 'D._lock'
  8. For example:
  9. -background loading thread calls (Images._lock, D._lock.on)
  10. -inside 'Draw' during drawing we load an image (D._lock.on, Images._lock) -> deadlock
  11. Instead of it, during drawing it will be: D._lock.on, D._lock.off, Images.lock and unlocks...
  12. We have to do this even if we're not going to perform and 'D' operations:
  13. -background loading thread calls (Images._lock, D._lock.on)
  14. -during 'Draw' drawing (where D is already locked) we use 'find' (D._lock.on, Images._lock) -> deadlock
  15. /******************************************************************************/
  16. typedef Map<Int, Int> MapInt; ASSERT(OFFSET(MapInt::Elm, data)==0); // '_data_offset' is not used because it's assumed to be always 0
  17. /******************************************************************************/
  18. _Map::_Map(Int block_elms, Int compare(CPtr key_a, CPtr key_b), Bool create(Ptr data, CPtr key, Ptr user), Ptr user, void (&copy_key)(Ptr dest, CPtr src)) : _memx(block_elms)
  19. {
  20. _elms=0;
  21. _key_offset=/*_data_offset=*/_desc_offset=_data_size=0;
  22. _mode=MAP_EXIT;
  23. _order=null;
  24. _user =user;
  25. _compare =compare;
  26. _create =create;
  27. _copy_key=copy_key;
  28. }
  29. _MapTS::_MapTS(Int block_elms, Int compare(CPtr key_a, CPtr key_b), Bool create(Ptr data, CPtr key, Ptr user), Ptr user, void (&copy_key)(Ptr dest, CPtr src)) : _Map(block_elms, compare, create, user, copy_key)
  30. {
  31. _d_lock=0;
  32. }
  33. void _Map::clear() {_memx.clear(); _elms=0; } // here can't Free '_order' because it assumes that its size is '_memx.maxElms()'
  34. void _Map::del () {_memx.del (); _elms=0; Free(_order);}
  35. void _MapTS::clear() {SyncUnlocker unlocker(D._lock); SyncLocker locker(_lock); super::clear();}
  36. void _MapTS::del () {SyncUnlocker unlocker(D._lock); SyncLocker locker(_lock); super::del ();}
  37. Byte _Map::mode(Byte mode) {Byte old_mode=T._mode; T._mode=mode; return old_mode;}
  38. CPtr _Map::key (Int i)C {return elmKey (*_order[i]);}
  39. Ptr _Map::operator[](Int i) {return elmData(*_order[i]);}
  40. CPtr _Map::absKey (Int abs_i)C {return elmKey (_memx.absElm(abs_i));}
  41. CPtr _Map::absData(Int abs_i)C {return elmData(_memx.absElm(abs_i));}
  42. INLINE Int _Map::dataInMapToAbsIndex(CPtr data)C {return data ? _memx.absIndexFastUnsafeValid(dataElm(data)) : -1;}
  43. /******************************************************************************/
  44. void _MapTS::lock()C
  45. {
  46. Byte d_locked=0;
  47. if(!_lock.owned()) // if we don't have the '_lock' yet
  48. {
  49. #if SYNC_UNLOCK_SINGLE
  50. if(D._lock.owned()){d_locked=true; D._lock.off();} // if we own the 'D._lock' then disable it and remember that we had it
  51. #else
  52. for(; D._lock.owned(); d_locked++)D._lock.off(); // if we own the 'D._lock' then disable it and remember that we had it
  53. #endif
  54. }
  55. _lock.on();
  56. if(d_locked)_d_lock=d_locked; // if we've disabled 'D._lock' then set how many times
  57. }
  58. void _MapTS::unlock()C
  59. {
  60. Byte d_locked=_d_lock; // remember if we had 'D._lock' disabled
  61. _d_lock =0; // disable before releasing '_lock'
  62. _lock.off();
  63. if(!_lock.owned()) // if we no longer own '_lock' then we can restore 'D._lock' if we disabled it
  64. {
  65. #if SYNC_UNLOCK_SINGLE
  66. if(d_locked)D._lock.on();
  67. #else
  68. REP(d_locked)D._lock.on();
  69. #endif
  70. }else // if we still own '_lock' then we should keep the variable about 'D._lock'
  71. {
  72. _d_lock=d_locked;
  73. }
  74. }
  75. /******************************************************************************/
  76. _Map::Elm* _Map::findElm(CPtr key, Int &stop)C
  77. {
  78. Int l=0, r=_elms; for(; l<r; )
  79. {
  80. Int mid =UInt(l+r)/2,
  81. compare=T._compare(key, elmKey(*_order[mid]));
  82. if(!compare)
  83. {
  84. stop=mid;
  85. return _order[mid];
  86. }
  87. if(compare<0)r=mid;
  88. else l=mid+1;
  89. }
  90. stop=l;
  91. return null;
  92. }
  93. /******************************************************************************/
  94. void _Map::addToOrder(Elm &elm, Int index)
  95. {
  96. MoveFastN(_order+index+1, _order+index, _elms-index); // faster version of: for(Int i=_elms; i>index; i--)_order[i]=_order[i-1];
  97. _order[index]=&elm; _elms++;
  98. }
  99. void _Map::removeFromOrder(Int index)
  100. {
  101. _elms--;
  102. MoveFastN(_order+index, _order+index+1, _elms-index); // faster version of: for(Int i=index; i<_elms; i++)_order[i]=_order[i+1];
  103. }
  104. /******************************************************************************/
  105. Int _Map::findAbsIndex(CPtr key)C {return dataInMapToAbsIndex(find(key));}
  106. Ptr _Map::find (CPtr key)C
  107. {
  108. Int i; if(Elm *elm=findElm(key, i))if(!(elmDesc(*elm).flag&MAP_ELM_LOADING))return elmData(*elm);
  109. return null;
  110. }
  111. Ptr _MapTS::find(CPtr key)C
  112. {
  113. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  114. SyncLocker locker( _lock);
  115. return super::find(key);
  116. }
  117. Int _MapTS::findAbsIndex(CPtr key)C
  118. {
  119. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  120. SyncLocker locker( _lock);
  121. return super::findAbsIndex(key);
  122. }
  123. /******************************************************************************/
  124. Int _Map::getAbsIndex(CPtr key) {return dataInMapToAbsIndex(get(key));}
  125. Ptr _Map::get (CPtr key)
  126. {
  127. Int stop;
  128. // find existing
  129. if(Elm *elm=findElm(key, stop))return (elmDesc(*elm).flag&MAP_ELM_LOADING) ? null : elmData(*elm);
  130. // always null
  131. if(_mode==MAP_ALL_NULL)return null;
  132. // new element
  133. Int max_elms =_memx.maxElms(); Elm &elm=_memx.New(); Desc &desc=elmDesc(elm); Ptr data=elmData(elm), elm_key=elmKey(elm); desc.flag=0;
  134. if( max_elms!=_memx.maxElms())Realloc(_order, _memx.maxElms(), max_elms);
  135. // always dummy
  136. if(_mode==MAP_ALL_DUMMY)
  137. {
  138. dummy:
  139. desc.flag|=MAP_ELM_DUMMY;
  140. _copy_key(elm_key, key);
  141. addToOrder(elm, stop);
  142. return data;
  143. }
  144. // create
  145. {
  146. desc.flag|=MAP_ELM_LOADING;
  147. _copy_key(elm_key, key); // copy key before creating
  148. addToOrder(elm, stop);
  149. // try to load
  150. if(_create ? _create(data, elm_key, _user) : true)
  151. {
  152. FlagDisable(desc.flag, MAP_ELM_LOADING);
  153. return data;
  154. }
  155. if(findElm(key, stop))
  156. {
  157. removeFromOrder(stop); // don't try to optimize by using the same 'stop' calculated from 'addToOrder' as it may have changed, because inside 'create' there could be other objects created
  158. FlagDisable(desc.flag, MAP_ELM_LOADING);
  159. }
  160. }
  161. // dummy on fail
  162. if(_mode==MAP_DUMMY)goto dummy;
  163. // null
  164. _memx.removeData(&elm);
  165. return null;
  166. }
  167. Ptr _MapTS::get(CPtr key)
  168. {
  169. SyncUnlocker unlocker(D._lock);
  170. SyncLocker locker( _lock);
  171. return super::get(key);
  172. }
  173. Int _MapTS::getAbsIndex(CPtr key)
  174. {
  175. SyncUnlocker unlocker(D._lock);
  176. SyncLocker locker( _lock);
  177. return super::getAbsIndex(key);
  178. }
  179. /******************************************************************************/
  180. void _Map::getFailed()C
  181. {
  182. if(_mode==MAP_EXIT)Exit("Can't create object in 'Map' from key");
  183. }
  184. Ptr _Map ::operator() (CPtr key) { if(Ptr data=get (key) )return data; getFailed(); return null;}
  185. Ptr _MapTS::operator() (CPtr key) { if(Ptr data=get (key) )return data; getFailed(); return null;}
  186. Int _Map ::requireAbsIndex(CPtr key) {Int abs_index=getAbsIndex(key); if(abs_index>=0)return abs_index; getFailed(); return -1;}
  187. Int _MapTS::requireAbsIndex(CPtr key) {Int abs_index=getAbsIndex(key); if(abs_index>=0)return abs_index; getFailed(); return -1;}
  188. /******************************************************************************/
  189. Bool _Map ::containsKey(CPtr key)C {return find(key)!=null;}
  190. Bool _MapTS::containsKey(CPtr key)C {return find(key)!=null;}
  191. /******************************************************************************/
  192. Bool _Map::containsData(CPtr data)C
  193. {
  194. C Elm *elm=dataElm(data); if(containsElm(elm))if(!(elmDesc(*elm).flag&MAP_ELM_LOADING))return true;
  195. return false;
  196. }
  197. Bool _MapTS::containsData(CPtr data)C
  198. {
  199. if(data)
  200. {
  201. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  202. SyncLocker locker( _lock);
  203. return super::containsData(data);
  204. }
  205. return false;
  206. }
  207. /******************************************************************************
  208. Int absIndex(C Elm *elm )C {return _memx. absIndex( elm );} // this is NOT thread-safe
  209. Int validIndex(C Elm *elm )C {return _memx.validIndex( elm );} // this is NOT thread-safe
  210. Int absIndex( CPtr data)C {return absIndex(dataElm(data));} // this is NOT thread-safe, assumes that '_data_offset' is zero
  211. Int validIndex( CPtr data)C {return validIndex(dataElm(data));} // this is NOT thread-safe, assumes that '_data_offset' is zero
  212. INLINE Int _Map::absIndex(CPtr data)C // this function assumes that 'data' is not null
  213. {
  214. #if 1 // fast, this can work because '_data_offset' is now zero
  215. DEBUG_ASSERT(_data_offset==0, "Cache data offset should be zero but it isn't");
  216. return _memx.absIndex(dataElm(data));
  217. #else
  218. #if WINDOWS
  219. __try {return _memx.absIndex(dataElm(data));} // use 'absIndex' only when exception control is possible
  220. __except(EXCEPTION_EXECUTE_HANDLER) {return -1;}
  221. #else
  222. return _memx._abs.index(data);
  223. #endif
  224. #endif
  225. }
  226. /******************************************************************************/
  227. CPtr _Map::dataToKey(CPtr data)C
  228. {
  229. C Elm *elm=dataElm(data); if(containsElm(elm))return elmKey(*elm); // key does not change while loading, so ignore MAP_ELM_LOADING
  230. return null;
  231. }
  232. CPtr _MapTS::dataToKey(CPtr data)C
  233. {
  234. if(data)
  235. {
  236. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  237. SyncLocker locker( _lock);
  238. return super::dataToKey(data);
  239. }
  240. return null;
  241. }
  242. /******************************************************************************/
  243. Int _Map::dataToIndex(CPtr data)C
  244. {
  245. C Elm *elm=dataElm(data); if(containsElm(elm)) // only after we know that this element belongs to this container then we can access its key
  246. {
  247. Int i; if(findElm(elmKey(*elm), i))return i;
  248. }
  249. return -1;
  250. }
  251. Int _MapTS::dataToIndex(CPtr data)C
  252. {
  253. if(data)
  254. {
  255. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  256. SyncLocker locker( _lock);
  257. return super::dataToIndex(data);
  258. }
  259. return -1;
  260. }
  261. /******************************************************************************/
  262. void _Map::remove(Int i)
  263. {
  264. if(InRange(i, _elms))
  265. {
  266. Elm *elm=_order[i];
  267. removeFromOrder(i);
  268. _memx.removeData(elm);
  269. }
  270. }
  271. void _MapTS::remove(Int i)
  272. {
  273. if(InRange(i, _elms))
  274. {
  275. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  276. SyncLocker locker( _lock);
  277. super::remove(i);
  278. }
  279. }
  280. void _Map::removeKey(CPtr key)
  281. {
  282. Int i; if(Elm *elm=findElm(key, i))
  283. {
  284. removeFromOrder(i);
  285. _memx.removeData(elm);
  286. }
  287. }
  288. void _MapTS::removeKey(CPtr key)
  289. {
  290. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  291. SyncLocker locker( _lock);
  292. super::removeKey(key);
  293. }
  294. void _Map::removeData(CPtr data)
  295. {
  296. C Elm *elm=dataElm(data); if(containsElm(elm)) // only after we know that this element belongs to this container then we can access its key
  297. {
  298. Int i; if(findElm(elmKey(*elm), i))
  299. {
  300. removeFromOrder(i);
  301. _memx.removeData(elm);
  302. }
  303. }
  304. }
  305. void _MapTS::removeData(CPtr data)
  306. {
  307. if(data)
  308. {
  309. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  310. SyncLocker locker( _lock);
  311. super::removeData(data);
  312. }
  313. }
  314. /******************************************************************************/
  315. Bool _Map::replaceKey(CPtr src_key, CPtr dest_key)
  316. {
  317. Int src_i, dest_i;
  318. Elm * src_elm=findElm( src_key, src_i),
  319. *dest_elm=findElm(dest_key, dest_i);
  320. if(src_elm || dest_elm) // if at least one element was found
  321. {
  322. if(src_elm!=dest_elm) // it's not the same element (this would mean that both keys are the same and we don't need to do anything)
  323. {
  324. if(src_elm && dest_elm)
  325. {
  326. // these assertions are to make sure of the following member order in class: data, key, desc, allowing to calculate the 'key_size'
  327. typedef Map<Long, Long>::Elm MapElmTest; // needed because macro can't support comma
  328. ASSERT(OFFSET(MapElmTest, data)==0 // 'data' is first
  329. && OFFSET(MapElmTest, key )> 0 // 'key' is after 'data'
  330. && OFFSET(MapElmTest, desc)>OFFSET(MapElmTest, key) // 'desc' is after 'key'
  331. && SIZE(MapElmTest)==MEMBER_SIZE(MapElmTest, data)+MEMBER_SIZE(MapElmTest, key)+MEMBER_SIZE(MapElmTest, desc)); // only data+key+desc in class
  332. Int key_size=_desc_offset-_key_offset; // !! Warning: this may include some padding between KEY and DESC, however we're just swapping between valid 'Map.Elm' KEY objects, so it's OK, alternative would be to store '_key_size' in '_Map' but that would increase class size
  333. Swap(elmKey(*src_elm), elmKey(*dest_elm), key_size); // swap in case 'src_key', 'dest_key' point to 'elmKey'
  334. Swap(_order[src_i], _order[dest_i]); // adjust order position
  335. }else
  336. {
  337. if(src_elm)
  338. {
  339. _copy_key(elmKey(*src_elm), dest_key); // adjust key "src_elm.key = dest_key"
  340. if(dest_i>src_i)dest_i--; // we're moving element from the order to the right, which means that one slot is already occupied by this element
  341. MoveElm(_order, _elms, src_i, dest_i); // adjust order position
  342. }else
  343. if(dest_elm)
  344. {
  345. _copy_key(elmKey(*dest_elm), src_key); // adjust key "dest_elm.key = src_key"
  346. if(src_i>dest_i)src_i--; // we're moving element from the order to the right, which means that one slot is already occupied by this element
  347. MoveElm(_order, _elms, dest_i, src_i); // adjust order position
  348. }
  349. }
  350. }
  351. return true;
  352. }
  353. return false;
  354. }
  355. Bool _MapTS::replaceKey(CPtr src_key, CPtr dest_key)
  356. {
  357. SyncUnlocker unlocker(D._lock); // must be used even though we're not using GPU
  358. SyncLocker locker( _lock);
  359. return super::replaceKey(src_key, dest_key);
  360. }
  361. /******************************************************************************/
  362. void _Map::from(C _Map &src)
  363. {
  364. del();
  365. _mode=src._mode;
  366. //_compare, _copy_key - keep
  367. #if 0 // modify dest type to match src type (this is not needed)
  368. _key_offset=src. _key_offset;
  369. //_data_offset=src._data_offset;
  370. _desc_offset=src._desc_offset;
  371. _data_size =src._data_size;
  372. _user =src._user;
  373. _create =src._create;
  374. _memx._reset(src._memx.elmSize(), src._memx.blockElms(), src._memx._new, src._memx._del);
  375. #endif
  376. _elms =src._elms;
  377. _memx.setNum(src._memx.elms()); // pre allocate memory for elements
  378. Alloc(_order, _memx.maxElms()); // initialize order exactly the same way as source
  379. REPA(_memx) // setup order exactly the same way as source
  380. {
  381. C Elm & src_elm = *src._order[i]; Int memx_index=src._memx.validIndex(&src_elm); DYNAMIC_ASSERT(InRange(memx_index, _memx), "Invalid element index in Map.operator=(C Map &src)"); // get valid index of i-th element in memx container
  382. Elm &dest_elm =_memx[memx_index]; _order[i]=&dest_elm; // set valid index of i-th element
  383. C Desc & src_desc=src.elmDesc( src_elm);
  384. Desc &dest_desc= elmDesc(dest_elm);
  385. dest_desc=src_desc; // copy desc
  386. _copy_key(elmKey(dest_elm), src.elmKey(src_elm)); // copy key
  387. }
  388. }
  389. /******************************************************************************/
  390. }
  391. /******************************************************************************/