Buf.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_BUF_HPP
  14. #define ZT_BUF_HPP
  15. #include "Constants.hpp"
  16. #include "Utils.hpp"
  17. #include "SharedPtr.hpp"
  18. #include "Mutex.hpp"
  19. #include "TriviallyCopyable.hpp"
  20. #include "FCV.hpp"
  21. #include <cstdint>
  22. #include <cstring>
  23. #include <cstdlib>
  24. #include <stdexcept>
  25. #include <utility>
  26. #include <algorithm>
  27. #include <new>
  28. // Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
  29. // and is a power of two. It needs to be a power of two because masking is significantly faster
  30. // than integer division modulus.
  31. #define ZT_BUF_MEM_SIZE 0x00004000
  32. #define ZT_BUF_MEM_MASK 0x00003fffU
  33. // Sanity limit on maximum buffer pool size
  34. #define ZT_BUF_MAX_POOL_SIZE 1024
  35. namespace ZeroTier {
  36. /**
  37. * Buffer and methods for branch-free bounds-checked data assembly and parsing
  38. *
  39. * This implements an extremely fast buffer for packet assembly and parsing that avoids
  40. * branching whenever possible. To be safe it must be used correctly!
  41. *
  42. * The read methods are prefixed by 'r', and write methods with 'w'. All methods take
  43. * an iterator, which is just an int that should be initialized to 0 (or whatever starting
  44. * position is desired). All read methods will advance the iterator regardless of outcome.
  45. *
  46. * Read and write methods fail silently in the event of overflow. They do not corrupt or
  47. * access memory outside the bounds of Buf, but will otherwise produce undefined results.
  48. *
  49. * IT IS THE RESPONSIBILITY OF THE USER of this class to use the readOverflow() and
  50. * writeOverflow() static methods to check the iterator for overflow after each series
  51. * of reads and writes and BEFORE ANY PARSING or other decisions are made on the basis
  52. * of the data obtained from a buffer. Failure to do so can result in bugs due
  53. * to parsing and branching on undefined or corrupt data.
  54. *
  55. * ^^ THIS IS VERY IMPORTANT ^^
  56. *
  57. * A typical packet assembly consists of repeated calls to the write methods followed by
  58. * a check to writeOverflow() before final packet armoring and transport. A typical packet
  59. * disassembly and parsing consists of a series of read calls to obtain the packet's
  60. * fields followed by a call to readOverflow() to check that these fields are valid. The
  61. * packet is discarded if readOverflow() returns true. Some packet parsers may make
  62. * additional reads and in this case readOverflow() must be checked after each set of
  63. * reads to ensure that overflow did not occur.
  64. *
  65. * Buf uses a lock-free pool for extremely fast allocation and deallocation.
  66. *
  67. * Buf can optionally take a template parameter that will be placed in the 'data'
  68. * union as 'fields.' This must be a basic plain data type and must be no larger than
  69. * ZT_BUF_MEM_SIZE. It's typically a packed struct.
  70. *
  71. * Buf instances with different template parameters can freely be cast to one another
  72. * as there is no actual difference in size or layout.
  73. *
  74. * @tparam U Type to overlap with data bytes in data union (can't be larger than ZT_BUF_MEM_SIZE)
  75. */
  76. class Buf
  77. {
  78. friend class SharedPtr<Buf>;
  79. public:
  80. // New and delete operators that allocate Buf instances from a shared lock-free memory pool.
  81. static void *operator new(std::size_t sz);
  82. static void operator delete(void *ptr);
  83. /**
  84. * Free all instances of Buf in shared pool.
  85. *
  86. * New buffers will be created and the pool repopulated if get() is called
  87. * and outstanding buffers will still be returned to the pool. This just
  88. * frees buffers currently held in reserve.
  89. */
  90. static void freePool() noexcept;
  91. /**
  92. * @return Number of Buf objects currently allocated via pool mechanism
  93. */
  94. static long poolAllocated() noexcept;
  95. /**
  96. * Slice is almost exactly like the built-in slice data structure in Go
  97. */
  98. struct Slice : TriviallyCopyable
  99. {
  100. ZT_ALWAYS_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) noexcept : b(b_),s(s_),e(e_) {}
  101. ZT_ALWAYS_INLINE Slice() noexcept : b(),s(0),e(0) {}
  102. ZT_ALWAYS_INLINE operator bool() const noexcept { return (b); }
  103. ZT_ALWAYS_INLINE unsigned int size() const noexcept { return (e - s); }
  104. ZT_ALWAYS_INLINE void zero() noexcept { b.zero(); s = 0; e = 0; }
  105. /**
  106. * Buffer holding slice data
  107. */
  108. SharedPtr<Buf> b;
  109. /**
  110. * Index of start of data in slice
  111. */
  112. unsigned int s;
  113. /**
  114. * Index of end of data in slice (make sure it's greater than or equal to 's'!)
  115. */
  116. unsigned int e;
  117. };
  118. /**
  119. * Assemble all slices in a vector into a single slice starting at position 0
  120. *
  121. * The returned slice will start at 0 and contain the entire vector unless the
  122. * vector is too large to fit in a single buffer. If that or any other error
  123. * occurs the returned slice will be empty and contain a NULL Buf.
  124. *
  125. * The vector may be modified by this function and should be considered
  126. * undefined after it is called.
  127. *
  128. * @tparam FCVC Capacity of FCV (generally inferred automatically)
  129. * @param fcv FCV containing one or more slices
  130. * @return Single slice containing fully assembled buffer (empty on error)
  131. */
  132. template<unsigned int FCVC>
  133. static ZT_ALWAYS_INLINE Buf::Slice assembleSliceVector(FCV<Buf::Slice,FCVC> &fcv) noexcept
  134. {
  135. Buf::Slice r;
  136. typename FCV<Buf::Slice,FCVC>::iterator s(fcv.begin());
  137. unsigned int l = s->e - s->s;
  138. if (l <= ZT_BUF_MEM_SIZE) {
  139. r.b.move(s->b);
  140. if (s->s > 0)
  141. memmove(r.b->unsafeData,r.b->unsafeData + s->s,l);
  142. r.e = l;
  143. while (++s != fcv.end()) {
  144. l = s->e - s->s;
  145. if (l > (ZT_BUF_MEM_SIZE - r.e)) {
  146. r.b.zero();
  147. r.e = 0;
  148. break;
  149. }
  150. memcpy(r.b->unsafeData + r.e,s->b->unsafeData + s->s,l);
  151. s->b.zero(); // let go of buffer in vector as soon as possible
  152. r.e += l;
  153. }
  154. }
  155. return r;
  156. }
  157. /**
  158. * Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
  159. */
  160. ZT_ALWAYS_INLINE Buf() noexcept : __nextInPool(0),__refCount(0) {}
  161. /**
  162. * Create a new buffer and copy data into it
  163. */
  164. ZT_ALWAYS_INLINE Buf(const void *const data,const unsigned int len) noexcept : __nextInPool(0),__refCount(0) { memcpy(unsafeData,data,len); }
  165. ZT_ALWAYS_INLINE Buf(const Buf &b2) noexcept : __nextInPool(0),__refCount(0) { memcpy(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE); }
  166. ZT_ALWAYS_INLINE Buf &operator=(const Buf &b2) noexcept
  167. {
  168. if (this != &b2)
  169. memcpy(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE);
  170. return *this;
  171. }
  172. /**
  173. * Check for overflow beyond the size of the buffer
  174. *
  175. * This is used to check for overflow when writing. It returns true if the iterator
  176. * has passed beyond the capacity of the buffer.
  177. *
  178. * @param ii Iterator to check
  179. * @return True if iterator has read past the size of the buffer
  180. */
  181. static ZT_ALWAYS_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
  182. /**
  183. * Check for overflow beyond the size of the data that should be in the buffer
  184. *
  185. * This is used to check for overflow when reading, with the second argument being the
  186. * size of the meaningful data actually present in the buffer.
  187. *
  188. * @param ii Iterator to check
  189. * @param size Size of data that should be in buffer
  190. * @return True if iterator has read past the size of the data
  191. */
  192. static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
  193. /**
  194. * Set all memory to zero
  195. */
  196. ZT_ALWAYS_INLINE void clear() noexcept { memset(unsafeData,0,ZT_BUF_MEM_SIZE); }
  197. /**
  198. * Zero security critical data using Utils::burn() to ensure it's never optimized out.
  199. */
  200. ZT_ALWAYS_INLINE void burn() noexcept { Utils::burn(unsafeData,ZT_BUF_MEM_SIZE); }
  201. /**
  202. * Read a byte
  203. *
  204. * @param ii Index value-result parameter (incremented by 1)
  205. * @return Byte (undefined on overflow)
  206. */
  207. ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const noexcept
  208. {
  209. const int s = ii++;
  210. return unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK];
  211. }
  212. /**
  213. * Read a 16-bit integer
  214. *
  215. * @param ii Index value-result parameter (incremented by 2)
  216. * @return Integer (undefined on overflow)
  217. */
  218. ZT_ALWAYS_INLINE uint16_t rI16(int &ii) const noexcept
  219. {
  220. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  221. ii += 2;
  222. #ifdef ZT_NO_UNALIGNED_ACCESS
  223. return (
  224. ((uint16_t)data.bytes[s] << 8U) |
  225. (uint16_t)data.bytes[s + 1]);
  226. #else
  227. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  228. #endif
  229. }
  230. /**
  231. * Read a 32-bit integer
  232. *
  233. * @param ii Index value-result parameter (incremented by 4)
  234. * @return Integer (undefined on overflow)
  235. */
  236. ZT_ALWAYS_INLINE uint32_t rI32(int &ii) const noexcept
  237. {
  238. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  239. ii += 4;
  240. #ifdef ZT_NO_UNALIGNED_ACCESS
  241. return (
  242. ((uint32_t)data.bytes[s] << 24U) |
  243. ((uint32_t)data.bytes[s + 1] << 16U) |
  244. ((uint32_t)data.bytes[s + 2] << 8U) |
  245. (uint32_t)data.bytes[s + 3]);
  246. #else
  247. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  248. #endif
  249. }
  250. /**
  251. * Read a 64-bit integer
  252. *
  253. * @param ii Index value-result parameter (incremented by 8)
  254. * @return Integer (undefined on overflow)
  255. */
  256. ZT_ALWAYS_INLINE uint64_t rI64(int &ii) const noexcept
  257. {
  258. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  259. ii += 8;
  260. #ifdef ZT_NO_UNALIGNED_ACCESS
  261. return (
  262. ((uint64_t)data.bytes[s] << 56U) |
  263. ((uint64_t)data.bytes[s + 1] << 48U) |
  264. ((uint64_t)data.bytes[s + 2] << 40U) |
  265. ((uint64_t)data.bytes[s + 3] << 32U) |
  266. ((uint64_t)data.bytes[s + 4] << 24U) |
  267. ((uint64_t)data.bytes[s + 5] << 16U) |
  268. ((uint64_t)data.bytes[s + 6] << 8U) |
  269. (uint64_t)data.bytes[s + 7]);
  270. #else
  271. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  272. #endif
  273. }
  274. /**
  275. * Read an object supporting the marshal/unmarshal interface
  276. *
  277. * If the return value is negative the object's state is undefined. A return value of
  278. * zero typically also indicates a problem, though this may depend on the object type.
  279. *
  280. * Since objects may be invalid even if there is no overflow, it's important to check
  281. * the return value of this function in all cases and discard invalid packets as it
  282. * indicates.
  283. *
  284. * @tparam T Object type
  285. * @param ii Index value-result parameter (incremented by object's size in bytes)
  286. * @param obj Object to read
  287. * @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
  288. */
  289. template<typename T>
  290. ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const noexcept
  291. {
  292. if (ii < ZT_BUF_MEM_SIZE) {
  293. int ms = obj.unmarshal(unsafeData + ii,ZT_BUF_MEM_SIZE - ii);
  294. if (ms > 0)
  295. ii += ms;
  296. return ms;
  297. }
  298. return -1;
  299. }
  300. /**
  301. * Read a C-style string from the buffer, making a copy and advancing the iterator
  302. *
  303. * Use this if the buffer's memory may get changed between reading and processing
  304. * what is read.
  305. *
  306. * @param ii Index value-result parameter (incremented by length of string)
  307. * @param buf Buffer to receive string
  308. * @param bufSize Capacity of buffer in bytes
  309. * @return Pointer to buf or NULL on overflow or error
  310. */
  311. ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
  312. {
  313. const char *const s = (const char *)(unsafeData + ii);
  314. const int sii = ii;
  315. while (ii < ZT_BUF_MEM_SIZE) {
  316. if (unsafeData[ii++] == 0) {
  317. memcpy(buf,s,ii - sii);
  318. return buf;
  319. }
  320. }
  321. return nullptr;
  322. }
  323. /**
  324. * Obtain a pointer to a C-style string in the buffer without copying and advance the iterator
  325. *
  326. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  327. * will indicate that an overflow occurred. As with other reads the string's contents are
  328. * undefined if readOverflow() returns true.
  329. *
  330. * This version avoids a copy and so is faster if the buffer won't be modified between
  331. * reading and processing.
  332. *
  333. * @param ii Index value-result parameter (incremented by length of string)
  334. * @return Pointer to null-terminated C-style string or NULL on overflow or error
  335. */
  336. ZT_ALWAYS_INLINE const char *rSnc(int &ii) const noexcept
  337. {
  338. const char *const s = (const char *)(unsafeData + ii);
  339. while (ii < ZT_BUF_MEM_SIZE) {
  340. if (unsafeData[ii++] == 0)
  341. return s;
  342. }
  343. return nullptr;
  344. }
  345. /**
  346. * Read a byte array from the buffer, making a copy and advancing the iterator
  347. *
  348. * Use this if the buffer's memory may get changed between reading and processing
  349. * what is read.
  350. *
  351. * @param ii Index value-result parameter (incremented by len)
  352. * @param bytes Buffer to contain data to read
  353. * @param len Length of buffer
  354. * @return Pointer to data or NULL on overflow or error
  355. */
  356. ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *const bytes,const unsigned int len) const noexcept
  357. {
  358. if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
  359. memcpy(bytes,unsafeData + ii,len);
  360. return reinterpret_cast<uint8_t *>(bytes);
  361. }
  362. return nullptr;
  363. }
  364. /**
  365. * Obtain a pointer to a field in the buffer without copying and advance the iterator
  366. *
  367. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  368. * will indicate that an overflow occurred.
  369. *
  370. * This version avoids a copy and so is faster if the buffer won't be modified between
  371. * reading and processing.
  372. *
  373. * @param ii Index value-result parameter (incremented by len)
  374. * @param len Length of data field to obtain a pointer to
  375. * @return Pointer to field or NULL on overflow
  376. */
  377. ZT_ALWAYS_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const noexcept
  378. {
  379. const uint8_t *const b = unsafeData + ii;
  380. return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
  381. }
  382. /**
  383. * Load a value at an index without advancing the index
  384. *
  385. * Note that unlike the rI??() methods this does not increment ii and therefore
  386. * will not necessarily result in a 'true' return from readOverflow(). It does
  387. * however subject 'ii' to soft bounds masking like the gI??() methods.
  388. */
  389. ZT_ALWAYS_INLINE uint8_t lI8(const int ii) const noexcept
  390. {
  391. return unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK];
  392. }
  393. /**
  394. * Load a value at an index without advancing the index
  395. *
  396. * Note that unlike the rI??() methods this does not increment ii and therefore
  397. * will not necessarily result in a 'true' return from readOverflow(). It does
  398. * however subject 'ii' to soft bounds masking like the gI??() methods.
  399. */
  400. ZT_ALWAYS_INLINE uint16_t lI16(const int ii) const noexcept
  401. {
  402. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  403. #ifdef ZT_NO_UNALIGNED_ACCESS
  404. return (
  405. ((uint16_t)data.bytes[s] << 8U) |
  406. (uint16_t)data.bytes[s + 1]);
  407. #else
  408. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  409. #endif
  410. }
  411. /**
  412. * Load a value at an index without advancing the index
  413. *
  414. * Note that unlike the rI??() methods this does not increment ii and therefore
  415. * will not necessarily result in a 'true' return from readOverflow(). It does
  416. * however subject 'ii' to soft bounds masking like the gI??() methods.
  417. */
  418. ZT_ALWAYS_INLINE uint32_t lI32(const int ii) const noexcept
  419. {
  420. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  421. #ifdef ZT_NO_UNALIGNED_ACCESS
  422. return (
  423. ((uint32_t)data.bytes[s] << 24U) |
  424. ((uint32_t)data.bytes[s + 1] << 16U) |
  425. ((uint32_t)data.bytes[s + 2] << 8U) |
  426. (uint32_t)data.bytes[s + 3]);
  427. #else
  428. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  429. #endif
  430. }
  431. /**
  432. * Load a value at an index without advancing the index
  433. *
  434. * Note that unlike the rI??() methods this does not increment ii and therefore
  435. * will not necessarily result in a 'true' return from readOverflow(). It does
  436. * however subject 'ii' to soft bounds masking like the gI??() methods.
  437. */
  438. ZT_ALWAYS_INLINE uint8_t lI64(const int ii) const noexcept
  439. {
  440. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  441. #ifdef ZT_NO_UNALIGNED_ACCESS
  442. return (
  443. ((uint64_t)data.bytes[s] << 56U) |
  444. ((uint64_t)data.bytes[s + 1] << 48U) |
  445. ((uint64_t)data.bytes[s + 2] << 40U) |
  446. ((uint64_t)data.bytes[s + 3] << 32U) |
  447. ((uint64_t)data.bytes[s + 4] << 24U) |
  448. ((uint64_t)data.bytes[s + 5] << 16U) |
  449. ((uint64_t)data.bytes[s + 6] << 8U) |
  450. (uint64_t)data.bytes[s + 7]);
  451. #else
  452. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  453. #endif
  454. }
  455. /**
  456. * Write a byte
  457. *
  458. * @param ii Index value-result parameter (incremented by 1)
  459. * @param n Byte
  460. */
  461. ZT_ALWAYS_INLINE void wI8(int &ii,const uint8_t n) noexcept
  462. {
  463. const int s = ii++;
  464. unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
  465. }
  466. /**
  467. * Write a 16-bit integer in big-endian byte order
  468. *
  469. * @param ii Index value-result parameter (incremented by 2)
  470. * @param n Integer
  471. */
  472. ZT_ALWAYS_INLINE void wI16(int &ii,const uint16_t n) noexcept
  473. {
  474. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  475. ii += 2;
  476. #ifdef ZT_NO_UNALIGNED_ACCESS
  477. b[s] = (uint8_t)(n >> 8U);
  478. b[s + 1] = (uint8_t)n;
  479. #else
  480. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  481. #endif
  482. }
  483. /**
  484. * Write a 32-bit integer in big-endian byte order
  485. *
  486. * @param ii Index value-result parameter (incremented by 4)
  487. * @param n Integer
  488. */
  489. ZT_ALWAYS_INLINE void wI32(int &ii,const uint32_t n) noexcept
  490. {
  491. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  492. ii += 4;
  493. #ifdef ZT_NO_UNALIGNED_ACCESS
  494. b[s] = (uint8_t)(n >> 24U);
  495. b[s + 1] = (uint8_t)(n >> 16U);
  496. b[s + 2] = (uint8_t)(n >> 8U);
  497. b[s + 3] = (uint8_t)n;
  498. #else
  499. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  500. #endif
  501. }
  502. /**
  503. * Write a 64-bit integer in big-endian byte order
  504. *
  505. * @param ii Index value-result parameter (incremented by 8)
  506. * @param n Integer
  507. */
  508. ZT_ALWAYS_INLINE void wI64(int &ii,const uint64_t n) noexcept
  509. {
  510. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  511. ii += 8;
  512. #ifdef ZT_NO_UNALIGNED_ACCESS
  513. b[s] = (uint8_t)(n >> 56U);
  514. b[s + 1] = (uint8_t)(n >> 48U);
  515. b[s + 2] = (uint8_t)(n >> 40U);
  516. b[s + 3] = (uint8_t)(n >> 32U);
  517. b[s + 4] = (uint8_t)(n >> 24U);
  518. b[s + 5] = (uint8_t)(n >> 16U);
  519. b[s + 6] = (uint8_t)(n >> 8U);
  520. b[s + 7] = (uint8_t)n;
  521. #else
  522. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  523. #endif
  524. }
  525. /**
  526. * Write an object implementing the marshal interface
  527. *
  528. * @tparam T Object type
  529. * @param ii Index value-result parameter (incremented by size of object)
  530. * @param t Object to write
  531. */
  532. template<typename T>
  533. ZT_ALWAYS_INLINE void wO(int &ii,T &t) noexcept
  534. {
  535. const int s = ii;
  536. if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
  537. int ms = t.marshal(unsafeData + s);
  538. if (ms > 0)
  539. ii += ms;
  540. } else {
  541. ii += T::marshalSizeMax(); // mark as overflowed even if we didn't do anything
  542. }
  543. }
  544. /**
  545. * Write a C-style null-terminated string (including the trailing zero)
  546. *
  547. * @param ii Index value-result parameter (incremented by length of string)
  548. * @param s String to write (writes an empty string if this is NULL)
  549. */
  550. ZT_ALWAYS_INLINE void wS(int &ii,const char *s) noexcept
  551. {
  552. if (s) {
  553. char c;
  554. do {
  555. c = *(s++);
  556. wI8(ii,(uint8_t)c);
  557. } while (c);
  558. } else {
  559. wI8(ii,0);
  560. }
  561. }
  562. /**
  563. * Write a byte array
  564. *
  565. * @param ii Index value-result parameter (incremented by len)
  566. * @param bytes Bytes to write
  567. * @param len Size of data in bytes
  568. */
  569. ZT_ALWAYS_INLINE void wB(int &ii,const void *const bytes,const unsigned int len) noexcept
  570. {
  571. const int s = ii;
  572. if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
  573. memcpy(unsafeData + s,bytes,len);
  574. }
  575. /**
  576. * Store a byte without advancing the index
  577. */
  578. ZT_ALWAYS_INLINE void sI8(const int ii,const uint8_t n) noexcept
  579. {
  580. unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK] = n;
  581. }
  582. /**
  583. * Store an integer without advancing the index
  584. */
  585. ZT_ALWAYS_INLINE void sI16(const int ii,const uint16_t n) noexcept
  586. {
  587. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  588. #ifdef ZT_NO_UNALIGNED_ACCESS
  589. b[s] = (uint8_t)(n >> 8U);
  590. b[s + 1] = (uint8_t)n;
  591. #else
  592. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  593. #endif
  594. }
  595. /**
  596. * Store an integer without advancing the index
  597. */
  598. ZT_ALWAYS_INLINE void sI32(const int ii,const uint32_t n) noexcept
  599. {
  600. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  601. #ifdef ZT_NO_UNALIGNED_ACCESS
  602. b[s] = (uint8_t)(n >> 24U);
  603. b[s + 1] = (uint8_t)(n >> 16U);
  604. b[s + 2] = (uint8_t)(n >> 8U);
  605. b[s + 3] = (uint8_t)n;
  606. #else
  607. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  608. #endif
  609. }
  610. /**
  611. * Store an integer without advancing the index
  612. */
  613. ZT_ALWAYS_INLINE void sI64(const int ii,const uint64_t n) noexcept
  614. {
  615. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  616. #ifdef ZT_NO_UNALIGNED_ACCESS
  617. b[s] = (uint8_t)(n >> 56U);
  618. b[s + 1] = (uint8_t)(n >> 48U);
  619. b[s + 2] = (uint8_t)(n >> 40U);
  620. b[s + 3] = (uint8_t)(n >> 32U);
  621. b[s + 4] = (uint8_t)(n >> 24U);
  622. b[s + 5] = (uint8_t)(n >> 16U);
  623. b[s + 6] = (uint8_t)(n >> 8U);
  624. b[s + 7] = (uint8_t)n;
  625. #else
  626. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  627. #endif
  628. }
  629. /**
  630. * @return Capacity of this buffer (usable size of data.bytes)
  631. */
  632. static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
  633. /**
  634. * Cast data in 'b' to a (usually packed) structure type
  635. *
  636. * Warning: this does no bounds checking. It should only be used with packed
  637. * struct types designed for use in packet decoding such as those in
  638. * Protocol.hpp, and if 'i' is non-zero the caller must check bounds.
  639. *
  640. * @tparam T Structure type to cast 'b' to
  641. * @param i Index of start of structure (default: 0)
  642. * @return Reference to 'b' cast to type T
  643. */
  644. template<typename T>
  645. ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) noexcept { return *reinterpret_cast<T *>(unsafeData + i); }
  646. /**
  647. * Cast data in 'b' to a (usually packed) structure type (const)
  648. *
  649. * Warning: this does no bounds checking. It should only be used with packed
  650. * struct types designed for use in packet decoding such as those in
  651. * Protocol.hpp, and if 'i' is non-zero the caller must check bounds.
  652. *
  653. * @tparam T Structure type to cast 'b' to
  654. * @param i Index of start of structure (default: 0)
  655. * @return Reference to 'b' cast to type T
  656. */
  657. template<typename T>
  658. ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const noexcept { return *reinterpret_cast<const T *>(unsafeData + i); }
  659. /**
  660. * Raw data held in buffer
  661. *
  662. * The additional eight bytes should not be used and should be considered undefined.
  663. * They exist to allow reads and writes of integer types to silently overflow if a
  664. * read or write is performed at the end of the buffer.
  665. */
  666. uint8_t unsafeData[ZT_BUF_MEM_SIZE + 8];
  667. private:
  668. // Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
  669. std::atomic<uintptr_t> __nextInPool;
  670. // Reference counter for SharedPtr<>
  671. std::atomic<int> __refCount;
  672. };
  673. } // namespace ZeroTier
  674. #endif