Hashtable.hpp 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. */
  23. #ifndef ZT_HASHTABLE_HPP
  24. #define ZT_HASHTABLE_HPP
  25. #include <stdint.h>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <stdexcept>
  29. #include <vector>
  30. namespace ZeroTier {
  31. /**
  32. * A minimal hash table implementation for the ZeroTier core
  33. *
  34. * This is not a drop-in replacement for STL containers, and has several
  35. * limitations. It's designed to be small and fast for use in the
  36. * ZeroTier core.
  37. */
  38. template<typename K,typename V>
  39. class Hashtable
  40. {
  41. private:
  42. struct _Bucket
  43. {
  44. _Bucket(const K &k,const V &v) : k(k),v(v) {}
  45. _Bucket(const K &k) : k(k),v() {}
  46. _Bucket(const _Bucket &b) : k(b.k),v(b.v) {}
  47. inline _Bucket &operator=(const _Bucket &b) { k = b.k; v = b.v; return *this; }
  48. K k;
  49. V v;
  50. _Bucket *next; // must be set manually for each _Bucket
  51. };
  52. public:
  53. /**
  54. * A simple forward iterator (different from STL)
  55. *
  56. * It's safe to erase the last key, but not others. Don't use set() since that
  57. * may rehash and invalidate the iterator. Note the erasing the key will destroy
  58. * the targets of the pointers returned by next().
  59. */
  60. class Iterator
  61. {
  62. public:
  63. /**
  64. * @param ht Hash table to iterate over
  65. */
  66. Iterator(Hashtable &ht) :
  67. _idx(0),
  68. _ht(&ht),
  69. _b(ht._t[0])
  70. {
  71. }
  72. /**
  73. * @param kptr Pointer to set to point to next key
  74. * @param vptr Pointer to set to point to next value
  75. * @return True if kptr and vptr are set, false if no more entries
  76. */
  77. inline bool next(K *&kptr,V *&vptr)
  78. {
  79. for(;;) {
  80. if (_b) {
  81. kptr = &(_b->k);
  82. vptr = &(_b->v);
  83. _b = _b->next;
  84. return true;
  85. }
  86. ++_idx;
  87. if (_idx >= _ht->_bc)
  88. return false;
  89. _b = _ht->_t[_idx];
  90. }
  91. }
  92. private:
  93. unsigned long _idx;
  94. Hashtable *_ht;
  95. Hashtable::_Bucket *_b;
  96. };
  97. friend class Hashtable::Iterator;
  98. /**
  99. * @param bc Initial capacity in buckets (default: 128, must be nonzero)
  100. */
  101. Hashtable(unsigned long bc = 128) :
  102. _t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * bc))),
  103. _bc(bc),
  104. _s(0)
  105. {
  106. if (!_t)
  107. throw std::bad_alloc();
  108. for(unsigned long i=0;i<bc;++i)
  109. _t[i] = (_Bucket *)0;
  110. }
  111. Hashtable(const Hashtable<K,V> &ht) :
  112. _t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * ht._bc))),
  113. _bc(ht._bc),
  114. _s(ht._s)
  115. {
  116. if (!_t)
  117. throw std::bad_alloc();
  118. for(unsigned long i=0;i<_bc;++i)
  119. _t[i] = (_Bucket *)0;
  120. for(unsigned long i=0;i<_bc;++i) {
  121. const _Bucket *b = ht._t[i];
  122. while (b) {
  123. _Bucket *nb = new _Bucket(*b);
  124. nb->next = _t[i];
  125. _t[i] = nb;
  126. b = b->next;
  127. }
  128. }
  129. }
  130. ~Hashtable()
  131. {
  132. this->clear();
  133. ::free(_t);
  134. }
  135. inline Hashtable &operator=(const Hashtable<K,V> &ht)
  136. {
  137. this->clear();
  138. if (ht._s) {
  139. for(unsigned long i=0;i<ht._bc;++i) {
  140. const _Bucket *b = ht._t[i];
  141. while (b) {
  142. this->set(b->k,b->v);
  143. b = b->next;
  144. }
  145. }
  146. }
  147. return *this;
  148. }
  149. /**
  150. * Erase all entries
  151. */
  152. inline void clear()
  153. {
  154. if (_s) {
  155. for(unsigned long i=0;i<_bc;++i) {
  156. _Bucket *b = _t[i];
  157. while (b) {
  158. _Bucket *const nb = b->next;
  159. delete b;
  160. b = nb;
  161. }
  162. _t[i] = (_Bucket *)0;
  163. }
  164. _s = 0;
  165. }
  166. }
  167. /**
  168. * @return Vector of all keys
  169. */
  170. inline typename std::vector<K> keys()
  171. {
  172. typename std::vector<K> k;
  173. if (_s) {
  174. for(unsigned long i=0;i<_bc;++i) {
  175. _Bucket *b = _t[i];
  176. while (b) {
  177. k.push_back(b->k);
  178. b = b->next;
  179. }
  180. }
  181. }
  182. return k;
  183. }
  184. /**
  185. * @param k Key
  186. * @return Pointer to value or NULL if not found
  187. */
  188. inline V *get(const K &k)
  189. {
  190. _Bucket *b = _t[_hc(k) % _bc];
  191. while (b) {
  192. if (b->k == k)
  193. return &(b->v);
  194. b = b->next;
  195. }
  196. return (V *)0;
  197. }
  198. inline const V *get(const K &k) const { return const_cast<Hashtable *>(this)->get(k); }
  199. /**
  200. * @param k Key
  201. * @return True if value was present
  202. */
  203. inline bool erase(const K &k)
  204. {
  205. const unsigned long bidx = _hc(k) % _bc;
  206. _Bucket *lastb = (_Bucket *)0;
  207. _Bucket *b = _t[bidx];
  208. while (b) {
  209. if (b->k == k) {
  210. if (lastb)
  211. lastb->next = b->next;
  212. else _t[bidx] = b->next;
  213. delete b;
  214. --_s;
  215. return true;
  216. }
  217. lastb = b;
  218. b = b->next;
  219. }
  220. return false;
  221. }
  222. /**
  223. * @param k Key
  224. * @param v Value
  225. * @return Reference to value in table
  226. */
  227. inline V &set(const K &k,const V &v)
  228. {
  229. const unsigned long h = _hc(k);
  230. unsigned long bidx = h % _bc;
  231. _Bucket *b = _t[bidx];
  232. while (b) {
  233. if (b->k == k) {
  234. b->v = v;
  235. return b->v;
  236. }
  237. b = b->next;
  238. }
  239. if (_s >= _bc) {
  240. _grow();
  241. bidx = h % _bc;
  242. }
  243. b = new _Bucket(k,v);
  244. b->next = _t[bidx];
  245. _t[bidx] = b;
  246. ++_s;
  247. return b->v;
  248. }
  249. /**
  250. * @param k Key
  251. * @return Value, possibly newly created
  252. */
  253. inline V &operator[](const K &k)
  254. {
  255. const unsigned long h = _hc(k);
  256. unsigned long bidx = h % _bc;
  257. _Bucket *b = _t[bidx];
  258. while (b) {
  259. if (b->k == k)
  260. return b->v;
  261. b = b->next;
  262. }
  263. if (_s >= _bc) {
  264. _grow();
  265. bidx = h % _bc;
  266. }
  267. b = new _Bucket(k);
  268. b->next = _t[bidx];
  269. _t[bidx] = b;
  270. ++_s;
  271. return b->v;
  272. }
  273. /**
  274. * @return Number of entries
  275. */
  276. inline unsigned long size() const throw() { return _s; }
  277. /**
  278. * @return True if table is empty
  279. */
  280. inline bool empty() const throw() { return (_s == 0); }
  281. private:
  282. template<typename O>
  283. static inline unsigned long _hc(const O &obj)
  284. {
  285. return obj.hashCode();
  286. }
  287. static inline unsigned long _hc(const uint64_t i)
  288. {
  289. // NOTE: this is fine for network IDs, but might be bad for other kinds
  290. // of IDs if they are not evenly or randomly distributed.
  291. return (unsigned long)((i ^ (i >> 32)) * 2654435761ULL);
  292. }
  293. inline void _grow()
  294. {
  295. const unsigned long nc = _bc * 2;
  296. _Bucket **nt = reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * nc));
  297. if (nt) {
  298. for(unsigned long i=0;i<nc;++i)
  299. nt[i] = (_Bucket *)0;
  300. for(unsigned long i=0;i<_bc;++i) {
  301. _Bucket *b = _t[i];
  302. while (b) {
  303. _Bucket *const nb = b->next;
  304. const unsigned long nidx = _hc(b->k) % nc;
  305. b->next = nt[nidx];
  306. nt[nidx] = b;
  307. b = nb;
  308. }
  309. }
  310. ::free(_t);
  311. _t = nt;
  312. _bc = nc;
  313. }
  314. }
  315. _Bucket **_t;
  316. unsigned long _bc;
  317. unsigned long _s;
  318. };
  319. } // namespace ZeroTier
  320. #endif