Buf.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_BUF_HPP
  14. #define ZT_BUF_HPP
  15. #include "Constants.hpp"
  16. #include "Utils.hpp"
  17. #include "SharedPtr.hpp"
  18. #include "Mutex.hpp"
  19. #include "TriviallyCopyable.hpp"
  20. #include "FCV.hpp"
  21. #include <cstdint>
  22. #include <cstring>
  23. #include <cstdlib>
  24. #include <stdexcept>
  25. #include <utility>
  26. #include <algorithm>
  27. #include <new>
  28. // Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
  29. // and is a power of two. It needs to be a power of two because masking is significantly faster
  30. // than integer division modulus.
  31. #define ZT_BUF_MEM_SIZE 0x00004000
  32. #define ZT_BUF_MEM_MASK 0x00003fffU
  33. // Sanity limit on maximum buffer pool size
  34. #define ZT_BUF_MAX_POOL_SIZE 1024
  35. namespace ZeroTier {
  36. /**
  37. * Buffer and methods for branch-free bounds-checked data assembly and parsing
  38. *
  39. * This implements an extremely fast buffer for packet assembly and parsing that avoids
  40. * branching whenever possible. To be safe it must be used correctly!
  41. *
  42. * The read methods are prefixed by 'r', and write methods with 'w'. All methods take
  43. * an iterator, which is just an int that should be initialized to 0 (or whatever starting
  44. * position is desired). All read methods will advance the iterator regardless of outcome.
  45. *
  46. * Read and write methods fail silently in the event of overflow. They do not corrupt or
  47. * access memory outside the bounds of Buf, but will otherwise produce undefined results.
  48. *
  49. * IT IS THE RESPONSIBILITY OF THE USER of this class to use the readOverflow() and
  50. * writeOverflow() static methods to check the iterator for overflow after each series
  51. * of reads and writes and BEFORE ANY PARSING or other decisions are made on the basis
  52. * of the data obtained from a buffer. Failure to do so can result in bugs due
  53. * to parsing and branching on undefined or corrupt data.
  54. *
  55. * ^^ THIS IS VERY IMPORTANT ^^
  56. *
  57. * A typical packet assembly consists of repeated calls to the write methods followed by
  58. * a check to writeOverflow() before final packet armoring and transport. A typical packet
  59. * disassembly and parsing consists of a series of read calls to obtain the packet's
  60. * fields followed by a call to readOverflow() to check that these fields are valid. The
  61. * packet is discarded if readOverflow() returns true. Some packet parsers may make
  62. * additional reads and in this case readOverflow() must be checked after each set of
  63. * reads to ensure that overflow did not occur.
  64. *
  65. * Buf uses a lock-free pool for extremely fast allocation and deallocation.
  66. *
  67. * Buf can optionally take a template parameter that will be placed in the 'data'
  68. * union as 'fields.' This must be a basic plain data type and must be no larger than
  69. * ZT_BUF_MEM_SIZE. It's typically a packed struct.
  70. *
  71. * Buf instances with different template parameters can freely be cast to one another
  72. * as there is no actual difference in size or layout.
  73. *
  74. * @tparam U Type to overlap with data bytes in data union (can't be larger than ZT_BUF_MEM_SIZE)
  75. */
  76. class Buf
  77. {
  78. friend class SharedPtr<Buf>;
  79. public:
  80. // New and delete operators that allocate Buf instances from a shared lock-free memory pool.
  81. static void *operator new(std::size_t sz);
  82. static void operator delete(void *ptr);
  83. /**
  84. * Free all instances of Buf in shared pool.
  85. *
  86. * New buffers will be created and the pool repopulated if get() is called
  87. * and outstanding buffers will still be returned to the pool. This just
  88. * frees buffers currently held in reserve.
  89. */
  90. static void freePool() noexcept;
  91. /**
  92. * @return Number of Buf objects currently allocated via pool mechanism
  93. */
  94. static long poolAllocated() noexcept;
  95. /**
  96. * Slice is almost exactly like the built-in slice data structure in Go
  97. */
  98. struct Slice : TriviallyCopyable
  99. {
  100. ZT_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) noexcept : b(b_),s(s_),e(e_) {}
  101. ZT_INLINE Slice() noexcept : b(),s(0),e(0) {}
  102. ZT_INLINE operator bool() const noexcept { return (b); }
  103. ZT_INLINE unsigned int size() const noexcept { return (e - s); }
  104. ZT_INLINE void zero() noexcept { b.zero(); s = 0; e = 0; }
  105. /**
  106. * Buffer holding slice data
  107. */
  108. SharedPtr<Buf> b;
  109. /**
  110. * Index of start of data in slice
  111. */
  112. unsigned int s;
  113. /**
  114. * Index of end of data in slice (make sure it's greater than or equal to 's'!)
  115. */
  116. unsigned int e;
  117. };
  118. /**
  119. * Assemble all slices in a vector into a single slice starting at position 0
  120. *
  121. * The returned slice will start at 0 and contain the entire vector unless the
  122. * vector is too large to fit in a single buffer. If that or any other error
  123. * occurs the returned slice will be empty and contain a NULL Buf.
  124. *
  125. * The vector may be modified by this function and should be considered
  126. * undefined after it is called.
  127. *
  128. * @tparam FCVC Capacity of FCV (generally inferred automatically)
  129. * @param fcv FCV containing one or more slices
  130. * @return Single slice containing fully assembled buffer (empty on error)
  131. */
  132. template<unsigned int FCVC>
  133. static ZT_INLINE Buf::Slice assembleSliceVector(FCV<Buf::Slice,FCVC> &fcv) noexcept
  134. {
  135. Buf::Slice r;
  136. typename FCV<Buf::Slice,FCVC>::iterator s(fcv.begin());
  137. unsigned int l = s->e - s->s;
  138. if (l <= ZT_BUF_MEM_SIZE) {
  139. r.b.move(s->b);
  140. if (s->s > 0)
  141. memmove(r.b->unsafeData,r.b->unsafeData + s->s,l);
  142. r.e = l;
  143. while (++s != fcv.end()) {
  144. l = s->e - s->s;
  145. if (l > (ZT_BUF_MEM_SIZE - r.e)) {
  146. r.b.zero();
  147. r.e = 0;
  148. break;
  149. }
  150. Utils::copy(r.b->unsafeData + r.e,s->b->unsafeData + s->s,l);
  151. s->b.zero(); // let go of buffer in vector as soon as possible
  152. r.e += l;
  153. }
  154. }
  155. return r;
  156. }
  157. /**
  158. * Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
  159. */
  160. ZT_INLINE Buf() noexcept : __nextInPool(0),__refCount(0) {}
  161. /**
  162. * Create a new buffer and copy data into it
  163. */
  164. ZT_INLINE Buf(const void *const data,const unsigned int len) noexcept :
  165. __nextInPool(0),
  166. __refCount(0)
  167. {
  168. Utils::copy(unsafeData,data,len);
  169. }
  170. ZT_INLINE Buf(const Buf &b2) noexcept :
  171. __nextInPool(0),
  172. __refCount(0)
  173. {
  174. Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData,b2.unsafeData);
  175. }
  176. ZT_INLINE Buf &operator=(const Buf &b2) noexcept
  177. {
  178. if (this != &b2)
  179. Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData,b2.unsafeData);
  180. return *this;
  181. }
  182. /**
  183. * Check for overflow beyond the size of the buffer
  184. *
  185. * This is used to check for overflow when writing. It returns true if the iterator
  186. * has passed beyond the capacity of the buffer.
  187. *
  188. * @param ii Iterator to check
  189. * @return True if iterator has read past the size of the buffer
  190. */
  191. static ZT_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
  192. /**
  193. * Check for overflow beyond the size of the data that should be in the buffer
  194. *
  195. * This is used to check for overflow when reading, with the second argument being the
  196. * size of the meaningful data actually present in the buffer.
  197. *
  198. * @param ii Iterator to check
  199. * @param size Size of data that should be in buffer
  200. * @return True if iterator has read past the size of the data
  201. */
  202. static ZT_INLINE bool readOverflow(const int &ii,const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
  203. /**
  204. * Set all memory to zero
  205. */
  206. ZT_INLINE void clear() noexcept
  207. {
  208. Utils::zero<ZT_BUF_MEM_SIZE>(unsafeData);
  209. }
  210. /**
  211. * Zero security critical data using Utils::burn() to ensure it's never optimized out.
  212. */
  213. ZT_INLINE void burn() noexcept
  214. {
  215. Utils::burn(unsafeData,ZT_BUF_MEM_SIZE);
  216. }
  217. /**
  218. * Read a byte
  219. *
  220. * @param ii Index value-result parameter (incremented by 1)
  221. * @return Byte (undefined on overflow)
  222. */
  223. ZT_INLINE uint8_t rI8(int &ii) const noexcept
  224. {
  225. const int s = ii++;
  226. return unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK];
  227. }
  228. /**
  229. * Read a 16-bit integer
  230. *
  231. * @param ii Index value-result parameter (incremented by 2)
  232. * @return Integer (undefined on overflow)
  233. */
  234. ZT_INLINE uint16_t rI16(int &ii) const noexcept
  235. {
  236. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  237. ii += 2;
  238. #ifdef ZT_NO_UNALIGNED_ACCESS
  239. return (
  240. ((uint16_t)unsafeData[s] << 8U) |
  241. (uint16_t)unsafeData[s + 1]);
  242. #else
  243. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  244. #endif
  245. }
  246. /**
  247. * Read a 32-bit integer
  248. *
  249. * @param ii Index value-result parameter (incremented by 4)
  250. * @return Integer (undefined on overflow)
  251. */
  252. ZT_INLINE uint32_t rI32(int &ii) const noexcept
  253. {
  254. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  255. ii += 4;
  256. #ifdef ZT_NO_UNALIGNED_ACCESS
  257. return (
  258. ((uint32_t)unsafeData[s] << 24U) |
  259. ((uint32_t)unsafeData[s + 1] << 16U) |
  260. ((uint32_t)unsafeData[s + 2] << 8U) |
  261. (uint32_t)unsafeData[s + 3]);
  262. #else
  263. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  264. #endif
  265. }
  266. /**
  267. * Read a 64-bit integer
  268. *
  269. * @param ii Index value-result parameter (incremented by 8)
  270. * @return Integer (undefined on overflow)
  271. */
  272. ZT_INLINE uint64_t rI64(int &ii) const noexcept
  273. {
  274. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  275. ii += 8;
  276. #ifdef ZT_NO_UNALIGNED_ACCESS
  277. return (
  278. ((uint64_t)unsafeData[s] << 56U) |
  279. ((uint64_t)unsafeData[s + 1] << 48U) |
  280. ((uint64_t)unsafeData[s + 2] << 40U) |
  281. ((uint64_t)unsafeData[s + 3] << 32U) |
  282. ((uint64_t)unsafeData[s + 4] << 24U) |
  283. ((uint64_t)unsafeData[s + 5] << 16U) |
  284. ((uint64_t)unsafeData[s + 6] << 8U) |
  285. (uint64_t)unsafeData[s + 7]);
  286. #else
  287. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  288. #endif
  289. }
  290. /**
  291. * Read an object supporting the marshal/unmarshal interface
  292. *
  293. * If the return value is negative the object's state is undefined. A return value of
  294. * zero typically also indicates a problem, though this may depend on the object type.
  295. *
  296. * Since objects may be invalid even if there is no overflow, it's important to check
  297. * the return value of this function in all cases and discard invalid packets as it
  298. * indicates.
  299. *
  300. * @tparam T Object type
  301. * @param ii Index value-result parameter (incremented by object's size in bytes)
  302. * @param obj Object to read
  303. * @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
  304. */
  305. template<typename T>
  306. ZT_INLINE int rO(int &ii,T &obj) const noexcept
  307. {
  308. if (ii < ZT_BUF_MEM_SIZE) {
  309. int ms = obj.unmarshal(unsafeData + ii,ZT_BUF_MEM_SIZE - ii);
  310. if (ms > 0)
  311. ii += ms;
  312. return ms;
  313. }
  314. return -1;
  315. }
  316. /**
  317. * Read a C-style string from the buffer, making a copy and advancing the iterator
  318. *
  319. * Use this if the buffer's memory may get changed between reading and processing
  320. * what is read.
  321. *
  322. * @param ii Index value-result parameter (incremented by length of string)
  323. * @param buf Buffer to receive string
  324. * @param bufSize Capacity of buffer in bytes
  325. * @return Pointer to buf or NULL on overflow or error
  326. */
  327. ZT_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
  328. {
  329. const char *const s = (const char *)(unsafeData + ii);
  330. const int sii = ii;
  331. while (ii < ZT_BUF_MEM_SIZE) {
  332. if (unsafeData[ii++] == 0) {
  333. Utils::copy(buf,s,ii - sii);
  334. return buf;
  335. }
  336. }
  337. return nullptr;
  338. }
  339. /**
  340. * Obtain a pointer to a C-style string in the buffer without copying and advance the iterator
  341. *
  342. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  343. * will indicate that an overflow occurred. As with other reads the string's contents are
  344. * undefined if readOverflow() returns true.
  345. *
  346. * This version avoids a copy and so is faster if the buffer won't be modified between
  347. * reading and processing.
  348. *
  349. * @param ii Index value-result parameter (incremented by length of string)
  350. * @return Pointer to null-terminated C-style string or NULL on overflow or error
  351. */
  352. ZT_INLINE const char *rSnc(int &ii) const noexcept
  353. {
  354. const char *const s = (const char *)(unsafeData + ii);
  355. while (ii < ZT_BUF_MEM_SIZE) {
  356. if (unsafeData[ii++] == 0)
  357. return s;
  358. }
  359. return nullptr;
  360. }
  361. /**
  362. * Read a byte array from the buffer, making a copy and advancing the iterator
  363. *
  364. * Use this if the buffer's memory may get changed between reading and processing
  365. * what is read.
  366. *
  367. * @param ii Index value-result parameter (incremented by len)
  368. * @param bytes Buffer to contain data to read
  369. * @param len Length of buffer
  370. * @return Pointer to data or NULL on overflow or error
  371. */
  372. ZT_INLINE uint8_t *rB(int &ii,void *const bytes,const unsigned int len) const noexcept
  373. {
  374. if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
  375. Utils::copy(bytes,unsafeData + ii,len);
  376. return reinterpret_cast<uint8_t *>(bytes);
  377. }
  378. return nullptr;
  379. }
  380. /**
  381. * Obtain a pointer to a field in the buffer without copying and advance the iterator
  382. *
  383. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  384. * will indicate that an overflow occurred.
  385. *
  386. * This version avoids a copy and so is faster if the buffer won't be modified between
  387. * reading and processing.
  388. *
  389. * @param ii Index value-result parameter (incremented by len)
  390. * @param len Length of data field to obtain a pointer to
  391. * @return Pointer to field or NULL on overflow
  392. */
  393. ZT_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const noexcept
  394. {
  395. const uint8_t *const b = unsafeData + ii;
  396. return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
  397. }
  398. /**
  399. * Load a value at an index without advancing the index
  400. *
  401. * Note that unlike the rI??() methods this does not increment ii and therefore
  402. * will not necessarily result in a 'true' return from readOverflow(). It does
  403. * however subject 'ii' to soft bounds masking like the gI??() methods.
  404. */
  405. ZT_INLINE uint8_t lI8(const int ii) const noexcept
  406. {
  407. return unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK];
  408. }
  409. /**
  410. * Load a value at an index without advancing the index
  411. *
  412. * Note that unlike the rI??() methods this does not increment ii and therefore
  413. * will not necessarily result in a 'true' return from readOverflow(). It does
  414. * however subject 'ii' to soft bounds masking like the gI??() methods.
  415. */
  416. ZT_INLINE uint16_t lI16(const int ii) const noexcept
  417. {
  418. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  419. #ifdef ZT_NO_UNALIGNED_ACCESS
  420. return (
  421. ((uint16_t)unsafeData[s] << 8U) |
  422. (uint16_t)unsafeData[s + 1]);
  423. #else
  424. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  425. #endif
  426. }
  427. /**
  428. * Load a value at an index without advancing the index
  429. *
  430. * Note that unlike the rI??() methods this does not increment ii and therefore
  431. * will not necessarily result in a 'true' return from readOverflow(). It does
  432. * however subject 'ii' to soft bounds masking like the gI??() methods.
  433. */
  434. ZT_INLINE uint32_t lI32(const int ii) const noexcept
  435. {
  436. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  437. #ifdef ZT_NO_UNALIGNED_ACCESS
  438. return (
  439. ((uint32_t)unsafeData[s] << 24U) |
  440. ((uint32_t)unsafeData[s + 1] << 16U) |
  441. ((uint32_t)unsafeData[s + 2] << 8U) |
  442. (uint32_t)unsafeData[s + 3]);
  443. #else
  444. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  445. #endif
  446. }
  447. /**
  448. * Load a value at an index without advancing the index
  449. *
  450. * Note that unlike the rI??() methods this does not increment ii and therefore
  451. * will not necessarily result in a 'true' return from readOverflow(). It does
  452. * however subject 'ii' to soft bounds masking like the gI??() methods.
  453. */
  454. ZT_INLINE uint8_t lI64(const int ii) const noexcept
  455. {
  456. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  457. #ifdef ZT_NO_UNALIGNED_ACCESS
  458. return (
  459. ((uint64_t)unsafeData[s] << 56U) |
  460. ((uint64_t)unsafeData[s + 1] << 48U) |
  461. ((uint64_t)unsafeData[s + 2] << 40U) |
  462. ((uint64_t)unsafeData[s + 3] << 32U) |
  463. ((uint64_t)unsafeData[s + 4] << 24U) |
  464. ((uint64_t)unsafeData[s + 5] << 16U) |
  465. ((uint64_t)unsafeData[s + 6] << 8U) |
  466. (uint64_t)unsafeData[s + 7]);
  467. #else
  468. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  469. #endif
  470. }
  471. /**
  472. * Write a byte
  473. *
  474. * @param ii Index value-result parameter (incremented by 1)
  475. * @param n Byte
  476. */
  477. ZT_INLINE void wI8(int &ii,const uint8_t n) noexcept
  478. {
  479. const int s = ii++;
  480. unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
  481. }
  482. /**
  483. * Write a 16-bit integer in big-endian byte order
  484. *
  485. * @param ii Index value-result parameter (incremented by 2)
  486. * @param n Integer
  487. */
  488. ZT_INLINE void wI16(int &ii,const uint16_t n) noexcept
  489. {
  490. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  491. ii += 2;
  492. #ifdef ZT_NO_UNALIGNED_ACCESS
  493. unsafeData[s] = (uint8_t)(n >> 8U);
  494. unsafeData[s + 1] = (uint8_t)n;
  495. #else
  496. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  497. #endif
  498. }
  499. /**
  500. * Write a 32-bit integer in big-endian byte order
  501. *
  502. * @param ii Index value-result parameter (incremented by 4)
  503. * @param n Integer
  504. */
  505. ZT_INLINE void wI32(int &ii,const uint32_t n) noexcept
  506. {
  507. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  508. ii += 4;
  509. #ifdef ZT_NO_UNALIGNED_ACCESS
  510. unsafeData[s] = (uint8_t)(n >> 24U);
  511. unsafeData[s + 1] = (uint8_t)(n >> 16U);
  512. unsafeData[s + 2] = (uint8_t)(n >> 8U);
  513. unsafeData[s + 3] = (uint8_t)n;
  514. #else
  515. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  516. #endif
  517. }
  518. /**
  519. * Write a 64-bit integer in big-endian byte order
  520. *
  521. * @param ii Index value-result parameter (incremented by 8)
  522. * @param n Integer
  523. */
  524. ZT_INLINE void wI64(int &ii,const uint64_t n) noexcept
  525. {
  526. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  527. ii += 8;
  528. #ifdef ZT_NO_UNALIGNED_ACCESS
  529. unsafeData[s] = (uint8_t)(n >> 56U);
  530. unsafeData[s + 1] = (uint8_t)(n >> 48U);
  531. unsafeData[s + 2] = (uint8_t)(n >> 40U);
  532. unsafeData[s + 3] = (uint8_t)(n >> 32U);
  533. unsafeData[s + 4] = (uint8_t)(n >> 24U);
  534. unsafeData[s + 5] = (uint8_t)(n >> 16U);
  535. unsafeData[s + 6] = (uint8_t)(n >> 8U);
  536. unsafeData[s + 7] = (uint8_t)n;
  537. #else
  538. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  539. #endif
  540. }
  541. /**
  542. * Write an object implementing the marshal interface
  543. *
  544. * @tparam T Object type
  545. * @param ii Index value-result parameter (incremented by size of object)
  546. * @param t Object to write
  547. */
  548. template<typename T>
  549. ZT_INLINE void wO(int &ii,T &t) noexcept
  550. {
  551. const int s = ii;
  552. if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
  553. int ms = t.marshal(unsafeData + s);
  554. if (ms > 0)
  555. ii += ms;
  556. } else {
  557. ii += T::marshalSizeMax(); // mark as overflowed even if we didn't do anything
  558. }
  559. }
  560. /**
  561. * Write a C-style null-terminated string (including the trailing zero)
  562. *
  563. * @param ii Index value-result parameter (incremented by length of string)
  564. * @param s String to write (writes an empty string if this is NULL)
  565. */
  566. ZT_INLINE void wS(int &ii,const char *s) noexcept
  567. {
  568. if (s) {
  569. char c;
  570. do {
  571. c = *(s++);
  572. wI8(ii,(uint8_t)c);
  573. } while (c);
  574. } else {
  575. wI8(ii,0);
  576. }
  577. }
  578. /**
  579. * Write a byte array
  580. *
  581. * @param ii Index value-result parameter (incremented by len)
  582. * @param bytes Bytes to write
  583. * @param len Size of data in bytes
  584. */
  585. ZT_INLINE void wB(int &ii,const void *const bytes,const unsigned int len) noexcept
  586. {
  587. const int s = ii;
  588. if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
  589. Utils::copy(unsafeData + s,bytes,len);
  590. }
  591. /**
  592. * Store a byte without advancing the index
  593. */
  594. ZT_INLINE void sI8(const int ii,const uint8_t n) noexcept
  595. {
  596. unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK] = n;
  597. }
  598. /**
  599. * Store an integer without advancing the index
  600. */
  601. ZT_INLINE void sI16(const int ii,const uint16_t n) noexcept
  602. {
  603. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  604. #ifdef ZT_NO_UNALIGNED_ACCESS
  605. unsafeData[s] = (uint8_t)(n >> 8U);
  606. unsafeData[s + 1] = (uint8_t)n;
  607. #else
  608. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  609. #endif
  610. }
  611. /**
  612. * Store an integer without advancing the index
  613. */
  614. ZT_INLINE void sI32(const int ii,const uint32_t n) noexcept
  615. {
  616. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  617. #ifdef ZT_NO_UNALIGNED_ACCESS
  618. unsafeData[s] = (uint8_t)(n >> 24U);
  619. unsafeData[s + 1] = (uint8_t)(n >> 16U);
  620. unsafeData[s + 2] = (uint8_t)(n >> 8U);
  621. unsafeData[s + 3] = (uint8_t)n;
  622. #else
  623. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  624. #endif
  625. }
  626. /**
  627. * Store an integer without advancing the index
  628. */
  629. ZT_INLINE void sI64(const int ii,const uint64_t n) noexcept
  630. {
  631. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  632. #ifdef ZT_NO_UNALIGNED_ACCESS
  633. unsafeData[s] = (uint8_t)(n >> 56U);
  634. unsafeData[s + 1] = (uint8_t)(n >> 48U);
  635. unsafeData[s + 2] = (uint8_t)(n >> 40U);
  636. unsafeData[s + 3] = (uint8_t)(n >> 32U);
  637. unsafeData[s + 4] = (uint8_t)(n >> 24U);
  638. unsafeData[s + 5] = (uint8_t)(n >> 16U);
  639. unsafeData[s + 6] = (uint8_t)(n >> 8U);
  640. unsafeData[s + 7] = (uint8_t)n;
  641. #else
  642. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  643. #endif
  644. }
  645. /**
  646. * @return Capacity of this buffer (usable size of data.bytes)
  647. */
  648. static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
  649. /**
  650. * Cast data in 'b' to a (usually packed) structure type
  651. *
  652. * Warning: this does no bounds checking. It should only be used with packed
  653. * struct types designed for use in packet decoding such as those in
  654. * Protocol.hpp, and if 'i' is non-zero the caller must check bounds.
  655. *
  656. * @tparam T Structure type to cast 'b' to
  657. * @param i Index of start of structure (default: 0)
  658. * @return Reference to 'b' cast to type T
  659. */
  660. template<typename T>
  661. ZT_INLINE T &as(const unsigned int i = 0) noexcept { return *reinterpret_cast<T *>(unsafeData + i); }
  662. /**
  663. * Cast data in 'b' to a (usually packed) structure type (const)
  664. *
  665. * Warning: this does no bounds checking. It should only be used with packed
  666. * struct types designed for use in packet decoding such as those in
  667. * Protocol.hpp, and if 'i' is non-zero the caller must check bounds.
  668. *
  669. * @tparam T Structure type to cast 'b' to
  670. * @param i Index of start of structure (default: 0)
  671. * @return Reference to 'b' cast to type T
  672. */
  673. template<typename T>
  674. ZT_INLINE const T &as(const unsigned int i = 0) const noexcept { return *reinterpret_cast<const T *>(unsafeData + i); }
  675. /**
  676. * Raw data held in buffer
  677. *
  678. * The additional eight bytes should not be used and should be considered undefined.
  679. * They exist to allow reads and writes of integer types to silently overflow if a
  680. * read or write is performed at the end of the buffer.
  681. */
  682. uint8_t unsafeData[ZT_BUF_MEM_SIZE + 8];
  683. private:
  684. // Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
  685. std::atomic<uintptr_t> __nextInPool;
  686. // Reference counter for SharedPtr<>
  687. std::atomic<int> __refCount;
  688. };
  689. } // namespace ZeroTier
  690. #endif