Buf.hpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_BUF_HPP
  14. #define ZT_BUF_HPP
  15. #include "Constants.hpp"
  16. #include "Utils.hpp"
  17. #include "SharedPtr.hpp"
  18. #include "Mutex.hpp"
  19. #include "TriviallyCopyable.hpp"
  20. #include "FCV.hpp"
  21. #include <stdexcept>
  22. #include <utility>
  23. #include <algorithm>
  24. #include <new>
  25. // Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
  26. // and is a power of two. It needs to be a power of two because masking is significantly faster
  27. // than integer division modulus.
  28. #define ZT_BUF_MEM_SIZE 0x00004000
  29. #define ZT_BUF_MEM_MASK 0x00003fffU
  30. // Sanity limit on maximum buffer pool size
  31. #define ZT_BUF_MAX_POOL_SIZE 1024
  32. namespace ZeroTier {
  33. /**
  34. * Buffer and methods for branch-free bounds-checked data assembly and parsing
  35. *
  36. * This implements an extremely fast buffer for packet assembly and parsing that avoids
  37. * branching whenever possible. To be safe it must be used correctly!
  38. *
  39. * The read methods are prefixed by 'r', and write methods with 'w'. All methods take
  40. * an iterator, which is just an int that should be initialized to 0 (or whatever starting
  41. * position is desired). All read methods will advance the iterator regardless of outcome.
  42. *
  43. * Read and write methods fail silently in the event of overflow. They do not corrupt or
  44. * access memory outside the bounds of Buf, but will otherwise produce undefined results.
  45. *
  46. * IT IS THE RESPONSIBILITY OF THE USER of this class to use the readOverflow() and
  47. * writeOverflow() static methods to check the iterator for overflow after each series
  48. * of reads and writes and BEFORE ANY PARSING or other decisions are made on the basis
  49. * of the data obtained from a buffer. Failure to do so can result in bugs due
  50. * to parsing and branching on undefined or corrupt data.
  51. *
  52. * ^^ THIS IS VERY IMPORTANT ^^
  53. *
  54. * A typical packet assembly consists of repeated calls to the write methods followed by
  55. * a check to writeOverflow() before final packet armoring and transport. A typical packet
  56. * disassembly and parsing consists of a series of read calls to obtain the packet's
  57. * fields followed by a call to readOverflow() to check that these fields are valid. The
  58. * packet is discarded if readOverflow() returns true. Some packet parsers may make
  59. * additional reads and in this case readOverflow() must be checked after each set of
  60. * reads to ensure that overflow did not occur.
  61. *
  62. * Buf uses a lock-free pool for extremely fast allocation and deallocation.
  63. *
  64. * Buf can optionally take a template parameter that will be placed in the 'data'
  65. * union as 'fields.' This must be a basic plain data type and must be no larger than
  66. * ZT_BUF_MEM_SIZE. It's typically a packed struct.
  67. *
  68. * Buf instances with different template parameters can freely be cast to one another
  69. * as there is no actual difference in size or layout.
  70. *
  71. * @tparam U Type to overlap with data bytes in data union (can't be larger than ZT_BUF_MEM_SIZE)
  72. */
  73. class Buf
  74. {
  75. friend class SharedPtr<Buf>;
  76. public:
  77. // New and delete operators that allocate Buf instances from a shared lock-free memory pool.
  78. static void *operator new(std::size_t sz);
  79. static void operator delete(void *ptr);
  80. /**
  81. * Raw data held in buffer
  82. *
  83. * The additional eight bytes should not be used and should be considered undefined.
  84. * They exist to allow reads and writes of integer types to silently overflow if a
  85. * read or write is performed at the end of the buffer.
  86. */
  87. uint8_t unsafeData[ZT_BUF_MEM_SIZE + 8];
  88. /**
  89. * Free all instances of Buf in shared pool.
  90. *
  91. * New buffers will be created and the pool repopulated if get() is called
  92. * and outstanding buffers will still be returned to the pool. This just
  93. * frees buffers currently held in reserve.
  94. */
  95. static void freePool() noexcept;
  96. /**
  97. * @return Number of Buf objects currently allocated via pool mechanism
  98. */
  99. static long poolAllocated() noexcept;
  100. /**
  101. * Slice is almost exactly like the built-in slice data structure in Go
  102. */
  103. struct Slice : TriviallyCopyable
  104. {
  105. ZT_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) noexcept : b(b_),s(s_),e(e_) {}
  106. ZT_INLINE Slice() noexcept : b(),s(0),e(0) {}
  107. ZT_INLINE operator bool() const noexcept { return (b); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
  108. ZT_INLINE unsigned int size() const noexcept { return (e - s); }
  109. ZT_INLINE void zero() noexcept { b.zero(); s = 0; e = 0; }
  110. /**
  111. * Buffer holding slice data
  112. */
  113. SharedPtr<Buf> b;
  114. /**
  115. * Index of start of data in slice
  116. */
  117. unsigned int s;
  118. /**
  119. * Index of end of data in slice (make sure it's greater than or equal to 's'!)
  120. */
  121. unsigned int e;
  122. };
  123. /**
  124. * A vector of slices making up a packet that might span more than one buffer.
  125. */
  126. class PacketVector : public ZeroTier::FCV<Slice,ZT_MAX_PACKET_FRAGMENTS>
  127. {
  128. public:
  129. ZT_INLINE PacketVector() : ZeroTier::FCV<Slice,ZT_MAX_PACKET_FRAGMENTS>() {}
  130. ZT_INLINE unsigned int totalSize() const noexcept
  131. {
  132. unsigned int size = 0;
  133. for(PacketVector::const_iterator s(begin());s!=end();++s)
  134. size += s->e - s->s;
  135. return size;
  136. }
  137. /**
  138. * Merge this packet vector into a single destination buffer
  139. *
  140. * @param b Destination buffer
  141. * @return Size of data in destination or -1 on error
  142. */
  143. ZT_INLINE int mergeCopy(Buf &b) const noexcept
  144. {
  145. unsigned int size = 0;
  146. for(PacketVector::const_iterator s(begin());s!=end();++s) {
  147. const unsigned int start = s->s;
  148. const unsigned int rem = s->e - start;
  149. if (likely((size + rem) <= ZT_BUF_MEM_SIZE)) {
  150. Utils::copy(b.unsafeData + size,s->b->unsafeData + start,rem);
  151. size += rem;
  152. } else {
  153. return -1;
  154. }
  155. }
  156. return (int)size;
  157. }
  158. /**
  159. * Merge this packet vector into a single destination buffer with an arbitrary copy function
  160. *
  161. * This can be used to e.g. simultaneously merge and decrypt a packet.
  162. *
  163. * @param b Destination buffer
  164. * @param simpleCopyBefore Don't start using copyFunction until this index (0 to always use)
  165. * @param copyFunction Function to invoke with memcpy-like arguments: (dest, source, size)
  166. * @tparam F Type of copyFunction (typically inferred)
  167. * @return Size of data in destination or -1 on error
  168. */
  169. template<typename F>
  170. ZT_INLINE int mergeMap(Buf &b,const unsigned int simpleCopyBefore,F copyFunction) const noexcept
  171. {
  172. unsigned int size = 0;
  173. for(PacketVector::const_iterator s(begin());s!=end();++s) {
  174. unsigned int start = s->s;
  175. unsigned int rem = s->e - start;
  176. if (likely((size + rem) <= ZT_BUF_MEM_SIZE)) {
  177. if (size < simpleCopyBefore) {
  178. unsigned int sc = simpleCopyBefore - size;
  179. if (unlikely(sc > rem))
  180. sc = rem;
  181. Utils::copy(b.unsafeData + size,s->b->unsafeData + start,sc);
  182. start += sc;
  183. rem -= sc;
  184. }
  185. if (likely(rem > 0)) {
  186. copyFunction(b.unsafeData + size,s->b->unsafeData + start,rem);
  187. size += rem;
  188. }
  189. } else {
  190. return -1;
  191. }
  192. }
  193. return (int)size;
  194. }
  195. };
  196. /**
  197. * Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
  198. */
  199. ZT_INLINE Buf() noexcept : __nextInPool(0),__refCount(0) {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
  200. /**
  201. * Create a new buffer and copy data into it
  202. */
  203. ZT_INLINE Buf(const void *const data,const unsigned int len) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
  204. __nextInPool(0),
  205. __refCount(0)
  206. {
  207. Utils::copy(unsafeData,data,len);
  208. }
  209. ZT_INLINE Buf(const Buf &b2) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
  210. __nextInPool(0),
  211. __refCount(0)
  212. {
  213. Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData,b2.unsafeData);
  214. }
  215. ZT_INLINE Buf &operator=(const Buf &b2) noexcept
  216. {
  217. if (this != &b2)
  218. Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData,b2.unsafeData);
  219. return *this;
  220. }
  221. /**
  222. * Check for overflow beyond the size of the buffer
  223. *
  224. * This is used to check for overflow when writing. It returns true if the iterator
  225. * has passed beyond the capacity of the buffer.
  226. *
  227. * @param ii Iterator to check
  228. * @return True if iterator has read past the size of the buffer
  229. */
  230. static ZT_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
  231. /**
  232. * Check for overflow beyond the size of the data that should be in the buffer
  233. *
  234. * This is used to check for overflow when reading, with the second argument being the
  235. * size of the meaningful data actually present in the buffer.
  236. *
  237. * @param ii Iterator to check
  238. * @param size Size of data that should be in buffer
  239. * @return True if iterator has read past the size of the data
  240. */
  241. static ZT_INLINE bool readOverflow(const int &ii,const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
  242. /**
  243. * Set all memory to zero
  244. */
  245. ZT_INLINE void clear() noexcept
  246. {
  247. Utils::zero<ZT_BUF_MEM_SIZE>(unsafeData);
  248. }
  249. /**
  250. * Read a byte
  251. *
  252. * @param ii Index value-result parameter (incremented by 1)
  253. * @return Byte (undefined on overflow)
  254. */
  255. ZT_INLINE uint8_t rI8(int &ii) const noexcept
  256. {
  257. const int s = ii++;
  258. return unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK];
  259. }
  260. /**
  261. * Read a 16-bit integer
  262. *
  263. * @param ii Index value-result parameter (incremented by 2)
  264. * @return Integer (undefined on overflow)
  265. */
  266. ZT_INLINE uint16_t rI16(int &ii) const noexcept
  267. {
  268. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  269. ii += 2;
  270. #ifdef ZT_NO_UNALIGNED_ACCESS
  271. return (
  272. ((uint16_t)unsafeData[s] << 8U) |
  273. (uint16_t)unsafeData[s + 1]);
  274. #else
  275. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  276. #endif
  277. }
  278. /**
  279. * Read a 32-bit integer
  280. *
  281. * @param ii Index value-result parameter (incremented by 4)
  282. * @return Integer (undefined on overflow)
  283. */
  284. ZT_INLINE uint32_t rI32(int &ii) const noexcept
  285. {
  286. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  287. ii += 4;
  288. #ifdef ZT_NO_UNALIGNED_ACCESS
  289. return (
  290. ((uint32_t)unsafeData[s] << 24U) |
  291. ((uint32_t)unsafeData[s + 1] << 16U) |
  292. ((uint32_t)unsafeData[s + 2] << 8U) |
  293. (uint32_t)unsafeData[s + 3]);
  294. #else
  295. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  296. #endif
  297. }
  298. /**
  299. * Read a 64-bit integer
  300. *
  301. * @param ii Index value-result parameter (incremented by 8)
  302. * @return Integer (undefined on overflow)
  303. */
  304. ZT_INLINE uint64_t rI64(int &ii) const noexcept
  305. {
  306. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  307. ii += 8;
  308. #ifdef ZT_NO_UNALIGNED_ACCESS
  309. return (
  310. ((uint64_t)unsafeData[s] << 56U) |
  311. ((uint64_t)unsafeData[s + 1] << 48U) |
  312. ((uint64_t)unsafeData[s + 2] << 40U) |
  313. ((uint64_t)unsafeData[s + 3] << 32U) |
  314. ((uint64_t)unsafeData[s + 4] << 24U) |
  315. ((uint64_t)unsafeData[s + 5] << 16U) |
  316. ((uint64_t)unsafeData[s + 6] << 8U) |
  317. (uint64_t)unsafeData[s + 7]);
  318. #else
  319. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  320. #endif
  321. }
  322. /**
  323. * Read an object supporting the marshal/unmarshal interface
  324. *
  325. * If the return value is negative the object's state is undefined. A return value of
  326. * zero typically also indicates a problem, though this may depend on the object type.
  327. *
  328. * Since objects may be invalid even if there is no overflow, it's important to check
  329. * the return value of this function in all cases and discard invalid packets as it
  330. * indicates.
  331. *
  332. * @tparam T Object type
  333. * @param ii Index value-result parameter (incremented by object's size in bytes)
  334. * @param obj Object to read
  335. * @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
  336. */
  337. template<typename T>
  338. ZT_INLINE int rO(int &ii,T &obj) const noexcept
  339. {
  340. if (likely(ii < ZT_BUF_MEM_SIZE)) {
  341. int ms = obj.unmarshal(unsafeData + ii,ZT_BUF_MEM_SIZE - ii);
  342. if (ms > 0)
  343. ii += ms;
  344. return ms;
  345. }
  346. return -1;
  347. }
  348. /**
  349. * Read a C-style string from the buffer, making a copy and advancing the iterator
  350. *
  351. * Use this if the buffer's memory may get changed between reading and processing
  352. * what is read.
  353. *
  354. * @param ii Index value-result parameter (incremented by length of string)
  355. * @param buf Buffer to receive string
  356. * @param bufSize Capacity of buffer in bytes
  357. * @return Pointer to buf or NULL on overflow or error
  358. */
  359. ZT_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
  360. {
  361. const char *const s = (const char *)(unsafeData + ii);
  362. const int sii = ii;
  363. while (ii < ZT_BUF_MEM_SIZE) {
  364. if (unsafeData[ii++] == 0) {
  365. const int l = ii - sii;
  366. if (unlikely((unsigned int)l > bufSize))
  367. return nullptr;
  368. Utils::copy(buf,s,l);
  369. return buf;
  370. }
  371. }
  372. return nullptr;
  373. }
  374. /**
  375. * Obtain a pointer to a C-style string in the buffer without copying and advance the iterator
  376. *
  377. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  378. * will indicate that an overflow occurred. As with other reads the string's contents are
  379. * undefined if readOverflow() returns true.
  380. *
  381. * This version avoids a copy and so is faster if the buffer won't be modified between
  382. * reading and processing.
  383. *
  384. * @param ii Index value-result parameter (incremented by length of string)
  385. * @return Pointer to null-terminated C-style string or NULL on overflow or error
  386. */
  387. ZT_INLINE const char *rSnc(int &ii) const noexcept
  388. {
  389. const char *const s = (const char *)(unsafeData + ii);
  390. while (ii < ZT_BUF_MEM_SIZE) {
  391. if (unsafeData[ii++] == 0)
  392. return s;
  393. }
  394. return nullptr;
  395. }
  396. /**
  397. * Read a byte array from the buffer, making a copy and advancing the iterator
  398. *
  399. * Use this if the buffer's memory may get changed between reading and processing
  400. * what is read.
  401. *
  402. * @param ii Index value-result parameter (incremented by len)
  403. * @param bytes Buffer to contain data to read
  404. * @param len Length of buffer
  405. * @return Pointer to data or NULL on overflow or error
  406. */
  407. ZT_INLINE uint8_t *rB(int &ii,void *const bytes,const unsigned int len) const noexcept
  408. {
  409. if (likely(((ii += (int)len) <= ZT_BUF_MEM_SIZE))) {
  410. Utils::copy(bytes,unsafeData + ii,len);
  411. return reinterpret_cast<uint8_t *>(bytes);
  412. }
  413. return nullptr;
  414. }
  415. /**
  416. * Obtain a pointer to a field in the buffer without copying and advance the iterator
  417. *
  418. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  419. * will indicate that an overflow occurred.
  420. *
  421. * This version avoids a copy and so is faster if the buffer won't be modified between
  422. * reading and processing.
  423. *
  424. * @param ii Index value-result parameter (incremented by len)
  425. * @param len Length of data field to obtain a pointer to
  426. * @return Pointer to field or NULL on overflow
  427. */
  428. ZT_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const noexcept
  429. {
  430. const uint8_t *const b = unsafeData + ii;
  431. return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
  432. }
  433. /**
  434. * Load a value at an index that is compile time checked against the maximum buffer size
  435. *
  436. * @tparam I Static index
  437. * @return Value
  438. */
  439. template<unsigned int I>
  440. ZT_INLINE uint8_t lI8() const noexcept
  441. {
  442. static_assert(I < ZT_BUF_MEM_SIZE,"overflow");
  443. return unsafeData[I];
  444. }
  445. /**
  446. * Load a value at an index that is compile time checked against the maximum buffer size
  447. *
  448. * @tparam I Static index
  449. * @return Value
  450. */
  451. template<unsigned int I>
  452. ZT_INLINE uint8_t lI16() const noexcept
  453. {
  454. static_assert((I + 1) < ZT_BUF_MEM_SIZE,"overflow");
  455. #ifdef ZT_NO_UNALIGNED_ACCESS
  456. return (
  457. ((uint16_t)unsafeData[I] << 8U) |
  458. (uint16_t)unsafeData[I + 1]);
  459. #else
  460. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + I));
  461. #endif
  462. }
  463. /**
  464. * Load a value at an index that is compile time checked against the maximum buffer size
  465. *
  466. * @tparam I Static index
  467. * @return Value
  468. */
  469. template<unsigned int I>
  470. ZT_INLINE uint8_t lI32() const noexcept
  471. {
  472. static_assert((I + 3) < ZT_BUF_MEM_SIZE,"overflow");
  473. #ifdef ZT_NO_UNALIGNED_ACCESS
  474. return (
  475. ((uint32_t)unsafeData[I] << 24U) |
  476. ((uint32_t)unsafeData[I + 1] << 16U) |
  477. ((uint32_t)unsafeData[I + 2] << 8U) |
  478. (uint32_t)unsafeData[I + 3]);
  479. #else
  480. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + I));
  481. #endif
  482. }
  483. /**
  484. * Load a value at an index that is compile time checked against the maximum buffer size
  485. *
  486. * @tparam I Static index
  487. * @return Value
  488. */
  489. template<unsigned int I>
  490. ZT_INLINE uint8_t lI64() const noexcept
  491. {
  492. static_assert((I + 7) < ZT_BUF_MEM_SIZE,"overflow");
  493. #ifdef ZT_NO_UNALIGNED_ACCESS
  494. return (
  495. ((uint64_t)unsafeData[I] << 56U) |
  496. ((uint64_t)unsafeData[I + 1] << 48U) |
  497. ((uint64_t)unsafeData[I + 2] << 40U) |
  498. ((uint64_t)unsafeData[I + 3] << 32U) |
  499. ((uint64_t)unsafeData[I + 4] << 24U) |
  500. ((uint64_t)unsafeData[I + 5] << 16U) |
  501. ((uint64_t)unsafeData[I + 6] << 8U) |
  502. (uint64_t)unsafeData[I + 7]);
  503. #else
  504. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + I));
  505. #endif
  506. }
  507. /**
  508. * Load a value at an index without advancing the index
  509. *
  510. * Note that unlike the rI??() methods this does not increment ii and therefore
  511. * will not necessarily result in a 'true' return from readOverflow(). It does
  512. * however subject 'ii' to soft bounds masking like the gI??() methods.
  513. */
  514. ZT_INLINE uint8_t lI8(const int ii) const noexcept
  515. {
  516. return unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK];
  517. }
  518. /**
  519. * Load a value at an index without advancing the index
  520. *
  521. * Note that unlike the rI??() methods this does not increment ii and therefore
  522. * will not necessarily result in a 'true' return from readOverflow(). It does
  523. * however subject 'ii' to soft bounds masking like the gI??() methods.
  524. */
  525. ZT_INLINE uint16_t lI16(const int ii) const noexcept
  526. {
  527. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  528. #ifdef ZT_NO_UNALIGNED_ACCESS
  529. return (
  530. ((uint16_t)unsafeData[s] << 8U) |
  531. (uint16_t)unsafeData[s + 1]);
  532. #else
  533. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  534. #endif
  535. }
  536. /**
  537. * Load a value at an index without advancing the index
  538. *
  539. * Note that unlike the rI??() methods this does not increment ii and therefore
  540. * will not necessarily result in a 'true' return from readOverflow(). It does
  541. * however subject 'ii' to soft bounds masking like the gI??() methods.
  542. */
  543. ZT_INLINE uint32_t lI32(const int ii) const noexcept
  544. {
  545. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  546. #ifdef ZT_NO_UNALIGNED_ACCESS
  547. return (
  548. ((uint32_t)unsafeData[s] << 24U) |
  549. ((uint32_t)unsafeData[s + 1] << 16U) |
  550. ((uint32_t)unsafeData[s + 2] << 8U) |
  551. (uint32_t)unsafeData[s + 3]);
  552. #else
  553. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  554. #endif
  555. }
  556. /**
  557. * Load a value at an index without advancing the index
  558. *
  559. * Note that unlike the rI??() methods this does not increment ii and therefore
  560. * will not necessarily result in a 'true' return from readOverflow(). It does
  561. * however subject 'ii' to soft bounds masking like the gI??() methods.
  562. */
  563. ZT_INLINE uint8_t lI64(const int ii) const noexcept
  564. {
  565. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  566. #ifdef ZT_NO_UNALIGNED_ACCESS
  567. return (
  568. ((uint64_t)unsafeData[s] << 56U) |
  569. ((uint64_t)unsafeData[s + 1] << 48U) |
  570. ((uint64_t)unsafeData[s + 2] << 40U) |
  571. ((uint64_t)unsafeData[s + 3] << 32U) |
  572. ((uint64_t)unsafeData[s + 4] << 24U) |
  573. ((uint64_t)unsafeData[s + 5] << 16U) |
  574. ((uint64_t)unsafeData[s + 6] << 8U) |
  575. (uint64_t)unsafeData[s + 7]);
  576. #else
  577. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  578. #endif
  579. }
  580. /**
  581. * Write a byte
  582. *
  583. * @param ii Index value-result parameter (incremented by 1)
  584. * @param n Byte
  585. */
  586. ZT_INLINE void wI8(int &ii,const uint8_t n) noexcept
  587. {
  588. const int s = ii++;
  589. unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
  590. }
  591. /**
  592. * Write a 16-bit integer in big-endian byte order
  593. *
  594. * @param ii Index value-result parameter (incremented by 2)
  595. * @param n Integer
  596. */
  597. ZT_INLINE void wI16(int &ii,const uint16_t n) noexcept
  598. {
  599. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  600. ii += 2;
  601. #ifdef ZT_NO_UNALIGNED_ACCESS
  602. unsafeData[s] = (uint8_t)(n >> 8U);
  603. unsafeData[s + 1] = (uint8_t)n;
  604. #else
  605. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  606. #endif
  607. }
  608. /**
  609. * Write a 32-bit integer in big-endian byte order
  610. *
  611. * @param ii Index value-result parameter (incremented by 4)
  612. * @param n Integer
  613. */
  614. ZT_INLINE void wI32(int &ii,const uint32_t n) noexcept
  615. {
  616. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  617. ii += 4;
  618. #ifdef ZT_NO_UNALIGNED_ACCESS
  619. unsafeData[s] = (uint8_t)(n >> 24U);
  620. unsafeData[s + 1] = (uint8_t)(n >> 16U);
  621. unsafeData[s + 2] = (uint8_t)(n >> 8U);
  622. unsafeData[s + 3] = (uint8_t)n;
  623. #else
  624. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  625. #endif
  626. }
  627. /**
  628. * Write a 64-bit integer in big-endian byte order
  629. *
  630. * @param ii Index value-result parameter (incremented by 8)
  631. * @param n Integer
  632. */
  633. ZT_INLINE void wI64(int &ii,const uint64_t n) noexcept
  634. {
  635. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  636. ii += 8;
  637. #ifdef ZT_NO_UNALIGNED_ACCESS
  638. unsafeData[s] = (uint8_t)(n >> 56U);
  639. unsafeData[s + 1] = (uint8_t)(n >> 48U);
  640. unsafeData[s + 2] = (uint8_t)(n >> 40U);
  641. unsafeData[s + 3] = (uint8_t)(n >> 32U);
  642. unsafeData[s + 4] = (uint8_t)(n >> 24U);
  643. unsafeData[s + 5] = (uint8_t)(n >> 16U);
  644. unsafeData[s + 6] = (uint8_t)(n >> 8U);
  645. unsafeData[s + 7] = (uint8_t)n;
  646. #else
  647. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  648. #endif
  649. }
  650. /**
  651. * Write an object implementing the marshal interface
  652. *
  653. * @tparam T Object type
  654. * @param ii Index value-result parameter (incremented by size of object)
  655. * @param t Object to write
  656. */
  657. template<typename T>
  658. ZT_INLINE void wO(int &ii,T &t) noexcept
  659. {
  660. const int s = ii;
  661. if (likely((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE)) {
  662. int ms = t.marshal(unsafeData + s);
  663. if (ms > 0)
  664. ii += ms;
  665. } else {
  666. ii += T::marshalSizeMax(); // mark as overflowed even if we didn't do anything
  667. }
  668. }
  669. /**
  670. * Write a C-style null-terminated string (including the trailing zero)
  671. *
  672. * @param ii Index value-result parameter (incremented by length of string)
  673. * @param s String to write (writes an empty string if this is NULL)
  674. */
  675. ZT_INLINE void wS(int &ii,const char *s) noexcept
  676. {
  677. if (s) {
  678. char c;
  679. do {
  680. c = *(s++);
  681. wI8(ii,(uint8_t)c);
  682. } while (c);
  683. } else {
  684. wI8(ii,0);
  685. }
  686. }
  687. /**
  688. * Write a byte array
  689. *
  690. * @param ii Index value-result parameter (incremented by len)
  691. * @param bytes Bytes to write
  692. * @param len Size of data in bytes
  693. */
  694. ZT_INLINE void wB(int &ii,const void *const bytes,const unsigned int len) noexcept
  695. {
  696. const int s = ii;
  697. if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
  698. Utils::copy(unsafeData + s,bytes,len);
  699. }
  700. /**
  701. * Write zeroes
  702. *
  703. * @param ii Index value-result parameter (incremented by len)
  704. * @param len Number of zero bytes to write
  705. */
  706. ZT_INLINE void wZ(int &ii,const unsigned int len) noexcept
  707. {
  708. const int s = ii;
  709. if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
  710. Utils::zero(unsafeData + s,len);
  711. }
  712. /**
  713. * Write secure random bytes
  714. *
  715. * @param ii Index value-result parameter (incremented by len)
  716. * @param len Number of random bytes to write
  717. */
  718. ZT_INLINE void wR(int &ii,const unsigned int len) noexcept
  719. {
  720. const int s = ii;
  721. if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
  722. Utils::getSecureRandom(unsafeData + s,len);
  723. }
  724. /**
  725. * Store a byte without advancing the index
  726. */
  727. ZT_INLINE void sI8(const int ii,const uint8_t n) noexcept
  728. {
  729. unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK] = n;
  730. }
  731. /**
  732. * Store an integer without advancing the index
  733. */
  734. ZT_INLINE void sI16(const int ii,const uint16_t n) noexcept
  735. {
  736. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  737. #ifdef ZT_NO_UNALIGNED_ACCESS
  738. unsafeData[s] = (uint8_t)(n >> 8U);
  739. unsafeData[s + 1] = (uint8_t)n;
  740. #else
  741. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  742. #endif
  743. }
  744. /**
  745. * Store an integer without advancing the index
  746. */
  747. ZT_INLINE void sI32(const int ii,const uint32_t n) noexcept
  748. {
  749. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  750. #ifdef ZT_NO_UNALIGNED_ACCESS
  751. unsafeData[s] = (uint8_t)(n >> 24U);
  752. unsafeData[s + 1] = (uint8_t)(n >> 16U);
  753. unsafeData[s + 2] = (uint8_t)(n >> 8U);
  754. unsafeData[s + 3] = (uint8_t)n;
  755. #else
  756. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  757. #endif
  758. }
  759. /**
  760. * Store an integer without advancing the index
  761. */
  762. ZT_INLINE void sI64(const int ii,const uint64_t n) noexcept
  763. {
  764. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  765. #ifdef ZT_NO_UNALIGNED_ACCESS
  766. unsafeData[s] = (uint8_t)(n >> 56U);
  767. unsafeData[s + 1] = (uint8_t)(n >> 48U);
  768. unsafeData[s + 2] = (uint8_t)(n >> 40U);
  769. unsafeData[s + 3] = (uint8_t)(n >> 32U);
  770. unsafeData[s + 4] = (uint8_t)(n >> 24U);
  771. unsafeData[s + 5] = (uint8_t)(n >> 16U);
  772. unsafeData[s + 6] = (uint8_t)(n >> 8U);
  773. unsafeData[s + 7] = (uint8_t)n;
  774. #else
  775. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  776. #endif
  777. }
  778. /**
  779. * @return Capacity of this buffer (usable size of data.bytes)
  780. */
  781. static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
  782. private:
  783. // Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
  784. std::atomic<uintptr_t> __nextInPool;
  785. // Reference counter for SharedPtr<>
  786. std::atomic<int> __refCount;
  787. };
  788. } // namespace ZeroTier
  789. #endif