lzham_vector.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. // File: lzham_vector.h
  2. // See Copyright Notice and license at the end of include/lzham.h
  3. #pragma once
  4. namespace lzham
  5. {
  6. struct elemental_vector
  7. {
  8. void* m_p;
  9. uint m_size;
  10. uint m_capacity;
  11. typedef void (*object_mover)(void* pDst, void* pSrc, uint num);
  12. bool increase_capacity(uint min_new_capacity, bool grow_hint, uint element_size, object_mover pRelocate, bool nofail);
  13. };
  14. template<typename T>
  15. class vector : public helpers::rel_ops< vector<T> >
  16. {
  17. public:
  18. typedef T* iterator;
  19. typedef const T* const_iterator;
  20. typedef T value_type;
  21. typedef T& reference;
  22. typedef const T& const_reference;
  23. typedef T* pointer;
  24. typedef const T* const_pointer;
  25. inline vector() :
  26. m_p(NULL),
  27. m_size(0),
  28. m_capacity(0)
  29. {
  30. }
  31. inline vector(uint n, const T& init) :
  32. m_p(NULL),
  33. m_size(0),
  34. m_capacity(0)
  35. {
  36. increase_capacity(n, false);
  37. helpers::construct_array(m_p, n, init);
  38. m_size = n;
  39. }
  40. inline vector(const vector& other) :
  41. m_p(NULL),
  42. m_size(0),
  43. m_capacity(0)
  44. {
  45. increase_capacity(other.m_size, false);
  46. m_size = other.m_size;
  47. if (LZHAM_IS_BITWISE_COPYABLE(T))
  48. memcpy(m_p, other.m_p, m_size * sizeof(T));
  49. else
  50. {
  51. T* pDst = m_p;
  52. const T* pSrc = other.m_p;
  53. for (uint i = m_size; i > 0; i--)
  54. helpers::construct(pDst++, *pSrc++);
  55. }
  56. }
  57. inline explicit vector(uint size) :
  58. m_p(NULL),
  59. m_size(0),
  60. m_capacity(0)
  61. {
  62. try_resize(size);
  63. }
  64. inline ~vector()
  65. {
  66. if (m_p)
  67. {
  68. scalar_type<T>::destruct_array(m_p, m_size);
  69. lzham_free(m_p);
  70. }
  71. }
  72. inline vector& operator= (const vector& other)
  73. {
  74. if (this == &other)
  75. return *this;
  76. if (m_capacity >= other.m_size)
  77. try_resize(0);
  78. else
  79. {
  80. clear();
  81. if (!increase_capacity(other.m_size, false))
  82. {
  83. LZHAM_FAIL("lzham::vector operator=: Out of memory!");
  84. return *this;
  85. }
  86. }
  87. if (LZHAM_IS_BITWISE_COPYABLE(T))
  88. memcpy(m_p, other.m_p, other.m_size * sizeof(T));
  89. else
  90. {
  91. T* pDst = m_p;
  92. const T* pSrc = other.m_p;
  93. for (uint i = other.m_size; i > 0; i--)
  94. helpers::construct(pDst++, *pSrc++);
  95. }
  96. m_size = other.m_size;
  97. return *this;
  98. }
  99. inline const T* begin() const { return m_p; }
  100. T* begin() { return m_p; }
  101. inline const T* end() const { return m_p + m_size; }
  102. T* end() { return m_p + m_size; }
  103. inline bool empty() const { return !m_size; }
  104. inline uint size() const { return m_size; }
  105. inline uint size_in_bytes() const { return m_size * sizeof(T); }
  106. inline uint capacity() const { return m_capacity; }
  107. // operator[] will assert on out of range indices, but in final builds there is (and will never be) any range checking on this method.
  108. inline const T& operator[] (uint i) const { LZHAM_ASSERT(i < m_size); return m_p[i]; }
  109. inline T& operator[] (uint i) { LZHAM_ASSERT(i < m_size); return m_p[i]; }
  110. // at() always includes range checking, even in final builds, unlike operator [].
  111. // The first element is returned if the index is out of range.
  112. inline const T& at(uint i) const { LZHAM_ASSERT(i < m_size); return (i >= m_size) ? m_p[0] : m_p[i]; }
  113. inline T& at(uint i) { LZHAM_ASSERT(i < m_size); return (i >= m_size) ? m_p[0] : m_p[i]; }
  114. inline const T& front() const { LZHAM_ASSERT(m_size); return m_p[0]; }
  115. inline T& front() { LZHAM_ASSERT(m_size); return m_p[0]; }
  116. inline const T& back() const { LZHAM_ASSERT(m_size); return m_p[m_size - 1]; }
  117. inline T& back() { LZHAM_ASSERT(m_size); return m_p[m_size - 1]; }
  118. inline const T* get_ptr() const { return m_p; }
  119. inline T* get_ptr() { return m_p; }
  120. inline void clear()
  121. {
  122. if (m_p)
  123. {
  124. scalar_type<T>::destruct_array(m_p, m_size);
  125. lzham_free(m_p);
  126. m_p = NULL;
  127. m_size = 0;
  128. m_capacity = 0;
  129. }
  130. }
  131. inline void clear_no_destruction()
  132. {
  133. if (m_p)
  134. {
  135. lzham_free(m_p);
  136. m_p = NULL;
  137. m_size = 0;
  138. m_capacity = 0;
  139. }
  140. }
  141. inline bool try_reserve(uint new_capacity)
  142. {
  143. return increase_capacity(new_capacity, true, true);
  144. }
  145. inline bool try_resize(uint new_size, bool grow_hint = false)
  146. {
  147. if (m_size != new_size)
  148. {
  149. if (new_size < m_size)
  150. scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
  151. else
  152. {
  153. if (new_size > m_capacity)
  154. {
  155. if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
  156. return false;
  157. }
  158. scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
  159. }
  160. m_size = new_size;
  161. }
  162. return true;
  163. }
  164. inline bool try_resize_no_construct(uint new_size, bool grow_hint = false)
  165. {
  166. if (new_size > m_capacity)
  167. {
  168. if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
  169. return false;
  170. }
  171. m_size = new_size;
  172. return true;
  173. }
  174. inline T* try_enlarge(uint i)
  175. {
  176. uint cur_size = m_size;
  177. if (!try_resize(cur_size + i, true))
  178. return NULL;
  179. return get_ptr() + cur_size;
  180. }
  181. inline bool try_push_back(const T& obj)
  182. {
  183. LZHAM_ASSERT(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
  184. if (m_size >= m_capacity)
  185. {
  186. if (!increase_capacity(m_size + 1, true, true))
  187. return false;
  188. }
  189. scalar_type<T>::construct(m_p + m_size, obj);
  190. m_size++;
  191. return true;
  192. }
  193. inline void pop_back()
  194. {
  195. LZHAM_ASSERT(m_size);
  196. if (m_size)
  197. {
  198. m_size--;
  199. scalar_type<T>::destruct(&m_p[m_size]);
  200. }
  201. }
  202. inline bool insert(uint index, const T* p, uint n)
  203. {
  204. LZHAM_ASSERT(index <= m_size);
  205. if (!n)
  206. return true;
  207. const uint orig_size = m_size;
  208. if (!try_resize(m_size + n, true))
  209. return false;
  210. const uint num_to_move = orig_size - index;
  211. if (num_to_move)
  212. {
  213. if (LZHAM_IS_BITWISE_COPYABLE(T))
  214. memmove(m_p + index + n, m_p + index, sizeof(T) * num_to_move);
  215. else
  216. {
  217. const T* pSrc = m_p + orig_size - 1;
  218. T* pDst = const_cast<T*>(pSrc) + n;
  219. for (uint i = 0; i < num_to_move; i++)
  220. {
  221. LZHAM_ASSERT((pDst - m_p) < (int)m_size);
  222. *pDst-- = *pSrc--;
  223. }
  224. }
  225. }
  226. T* pDst = m_p + index;
  227. if (LZHAM_IS_BITWISE_COPYABLE(T))
  228. memcpy(pDst, p, sizeof(T) * n);
  229. else
  230. {
  231. for (uint i = 0; i < n; i++)
  232. {
  233. LZHAM_ASSERT((pDst - m_p) < (int)m_size);
  234. *pDst++ = *p++;
  235. }
  236. }
  237. return true;
  238. }
  239. // push_front() isn't going to be very fast - it's only here for usability.
  240. inline bool try_push_front(const T& obj)
  241. {
  242. return insert(0, &obj, 1);
  243. }
  244. bool append(const vector& other)
  245. {
  246. if (other.m_size)
  247. return insert(m_size, &other[0], other.m_size);
  248. return true;
  249. }
  250. bool append(const T* p, uint n)
  251. {
  252. if (n)
  253. return insert(m_size, p, n);
  254. return true;
  255. }
  256. inline void erase(uint start, uint n)
  257. {
  258. LZHAM_ASSERT((start + n) <= m_size);
  259. if ((start + n) > m_size)
  260. return;
  261. if (!n)
  262. return;
  263. const uint num_to_move = m_size - (start + n);
  264. T* pDst = m_p + start;
  265. const T* pSrc = m_p + start + n;
  266. if (LZHAM_IS_BITWISE_COPYABLE(T))
  267. memmove(pDst, pSrc, num_to_move * sizeof(T));
  268. else
  269. {
  270. T* pDst_end = pDst + num_to_move;
  271. while (pDst != pDst_end)
  272. *pDst++ = *pSrc++;
  273. scalar_type<T>::destruct_array(pDst_end, n);
  274. }
  275. m_size -= n;
  276. }
  277. inline void erase(uint index)
  278. {
  279. erase(index, 1);
  280. }
  281. inline void erase(T* p)
  282. {
  283. LZHAM_ASSERT((p >= m_p) && (p < (m_p + m_size)));
  284. erase(static_cast<uint>(p - m_p));
  285. }
  286. void erase_unordered(uint index)
  287. {
  288. LZHAM_ASSERT(index < m_size);
  289. if ((index + 1) < m_size)
  290. (*this)[index] = back();
  291. pop_back();
  292. }
  293. inline bool operator== (const vector& rhs) const
  294. {
  295. if (m_size != rhs.m_size)
  296. return false;
  297. else if (m_size)
  298. {
  299. if (scalar_type<T>::cFlag)
  300. return memcmp(m_p, rhs.m_p, sizeof(T) * m_size) == 0;
  301. else
  302. {
  303. const T* pSrc = m_p;
  304. const T* pDst = rhs.m_p;
  305. for (uint i = m_size; i; i--)
  306. if (!(*pSrc++ == *pDst++))
  307. return false;
  308. }
  309. }
  310. return true;
  311. }
  312. inline bool operator< (const vector& rhs) const
  313. {
  314. const uint min_size = math::minimum(m_size, rhs.m_size);
  315. const T* pSrc = m_p;
  316. const T* pSrc_end = m_p + min_size;
  317. const T* pDst = rhs.m_p;
  318. while ((pSrc < pSrc_end) && (*pSrc == *pDst))
  319. {
  320. pSrc++;
  321. pDst++;
  322. }
  323. if (pSrc < pSrc_end)
  324. return *pSrc < *pDst;
  325. return m_size < rhs.m_size;
  326. }
  327. inline void swap(vector& other)
  328. {
  329. utils::swap(m_p, other.m_p);
  330. utils::swap(m_size, other.m_size);
  331. utils::swap(m_capacity, other.m_capacity);
  332. }
  333. inline void sort()
  334. {
  335. std::sort(begin(), end());
  336. }
  337. inline void unique()
  338. {
  339. if (!empty())
  340. {
  341. sort();
  342. resize(std::unique(begin(), end()) - begin());
  343. }
  344. }
  345. inline void reverse()
  346. {
  347. uint j = m_size >> 1;
  348. for (uint i = 0; i < j; i++)
  349. utils::swap(m_p[i], m_p[m_size - 1 - i]);
  350. }
  351. inline int find(const T& key) const
  352. {
  353. const T* p = m_p;
  354. const T* p_end = m_p + m_size;
  355. uint index = 0;
  356. while (p != p_end)
  357. {
  358. if (key == *p)
  359. return index;
  360. p++;
  361. index++;
  362. }
  363. return cInvalidIndex;
  364. }
  365. inline int find_sorted(const T& key) const
  366. {
  367. if (m_size)
  368. {
  369. // Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
  370. int i = ((m_size + 1) >> 1) - 1;
  371. int m = m_size;
  372. for ( ; ; )
  373. {
  374. LZHAM_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
  375. const T* pKey_i = m_p + i;
  376. int cmp = key < *pKey_i;
  377. if ((!cmp) && (key == *pKey_i)) return i;
  378. m >>= 1;
  379. if (!m) break;
  380. cmp = -cmp;
  381. i += (((m + 1) >> 1) ^ cmp) - cmp;
  382. LZHAM_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
  383. pKey_i = m_p + i;
  384. cmp = key < *pKey_i;
  385. if ((!cmp) && (key == *pKey_i)) return i;
  386. m >>= 1;
  387. if (!m) break;
  388. cmp = -cmp;
  389. i += (((m + 1) >> 1) ^ cmp) - cmp;
  390. }
  391. }
  392. return cInvalidIndex;
  393. }
  394. template<typename Q>
  395. inline int find_sorted(const T& key, Q less_than) const
  396. {
  397. if (m_size)
  398. {
  399. // Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
  400. int i = ((m_size + 1) >> 1) - 1;
  401. int m = m_size;
  402. for ( ; ; )
  403. {
  404. LZHAM_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
  405. const T* pKey_i = m_p + i;
  406. int cmp = less_than(key, *pKey_i);
  407. if ((!cmp) && (!less_than(*pKey_i, key))) return i;
  408. m >>= 1;
  409. if (!m) break;
  410. cmp = -cmp;
  411. i += (((m + 1) >> 1) ^ cmp) - cmp;
  412. LZHAM_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
  413. pKey_i = m_p + i;
  414. cmp = less_than(key, *pKey_i);
  415. if ((!cmp) && (!less_than(*pKey_i, key))) return i;
  416. m >>= 1;
  417. if (!m) break;
  418. cmp = -cmp;
  419. i += (((m + 1) >> 1) ^ cmp) - cmp;
  420. }
  421. }
  422. return cInvalidIndex;
  423. }
  424. inline uint count_occurences(const T& key) const
  425. {
  426. uint c = 0;
  427. const T* p = m_p;
  428. const T* p_end = m_p + m_size;
  429. while (p != p_end)
  430. {
  431. if (key == *p)
  432. c++;
  433. p++;
  434. }
  435. return c;
  436. }
  437. inline void set_all(const T& o)
  438. {
  439. if ((sizeof(T) == 1) && (scalar_type<T>::cFlag))
  440. memset(m_p, *reinterpret_cast<const uint8*>(&o), m_size);
  441. else
  442. {
  443. T* pDst = m_p;
  444. T* pDst_end = pDst + m_size;
  445. while (pDst != pDst_end)
  446. *pDst++ = o;
  447. }
  448. }
  449. private:
  450. T* m_p;
  451. uint m_size;
  452. uint m_capacity;
  453. template<typename Q> struct is_vector { enum { cFlag = false }; };
  454. template<typename Q> struct is_vector< vector<Q> > { enum { cFlag = true }; };
  455. static void object_mover(void* pDst_void, void* pSrc_void, uint num)
  456. {
  457. T* pSrc = static_cast<T*>(pSrc_void);
  458. T* const pSrc_end = pSrc + num;
  459. T* pDst = static_cast<T*>(pDst_void);
  460. while (pSrc != pSrc_end)
  461. {
  462. new (static_cast<void*>(pDst)) T(*pSrc);
  463. pSrc->~T();
  464. pSrc++;
  465. pDst++;
  466. }
  467. }
  468. inline bool increase_capacity(uint min_new_capacity, bool grow_hint, bool nofail = false)
  469. {
  470. return reinterpret_cast<elemental_vector*>(this)->increase_capacity(
  471. min_new_capacity, grow_hint, sizeof(T),
  472. (LZHAM_IS_BITWISE_MOVABLE(T) || (is_vector<T>::cFlag)) ? NULL : object_mover, nofail);
  473. }
  474. };
  475. template<typename T> struct bitwise_movable< vector<T> > { enum { cFlag = true }; };
  476. extern void vector_test();
  477. template<typename T>
  478. inline void swap(vector<T>& a, vector<T>& b)
  479. {
  480. a.swap(b);
  481. }
  482. } // namespace lzham