Buf.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_BUF_HPP
  14. #define ZT_BUF_HPP
  15. #include "Constants.hpp"
  16. #include "Utils.hpp"
  17. #include "SharedPtr.hpp"
  18. #include "Mutex.hpp"
  19. #include "TriviallyCopyable.hpp"
  20. #include "FCV.hpp"
  21. #include <cstdint>
  22. #include <cstring>
  23. #include <cstdlib>
  24. #include <stdexcept>
  25. #include <utility>
  26. #include <algorithm>
  27. #include <new>
  28. // Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
  29. // and is a power of two. It needs to be a power of two because masking is significantly faster
  30. // than integer division modulus.
  31. #define ZT_BUF_MEM_SIZE 0x00004000
  32. #define ZT_BUF_MEM_MASK 0x00003fffU
  33. // Sanity limit on maximum buffer pool size
  34. #define ZT_BUF_MAX_POOL_SIZE 1024
  35. namespace ZeroTier {
  36. /**
  37. * Buffer and methods for branch-free bounds-checked data assembly and parsing
  38. *
  39. * This implements an extremely fast buffer for packet assembly and parsing that avoids
  40. * branching whenever possible. To be safe it must be used correctly!
  41. *
  42. * The read methods are prefixed by 'r', and write methods with 'w'. All methods take
  43. * an iterator, which is just an int that should be initialized to 0 (or whatever starting
  44. * position is desired). All read methods will advance the iterator regardless of outcome.
  45. *
  46. * Read and write methods fail silently in the event of overflow. They do not corrupt or
  47. * access memory outside the bounds of Buf, but will otherwise produce undefined results.
  48. *
  49. * IT IS THE RESPONSIBILITY OF THE USER of this class to use the readOverflow() and
  50. * writeOverflow() static methods to check the iterator for overflow after each series
  51. * of reads and writes and BEFORE ANY PARSING or other decisions are made on the basis
  52. * of the data obtained from a buffer. Failure to do so can result in bugs due
  53. * to parsing and branching on undefined or corrupt data.
  54. *
  55. * ^^ THIS IS VERY IMPORTANT ^^
  56. *
  57. * A typical packet assembly consists of repeated calls to the write methods followed by
  58. * a check to writeOverflow() before final packet armoring and transport. A typical packet
  59. * disassembly and parsing consists of a series of read calls to obtain the packet's
  60. * fields followed by a call to readOverflow() to check that these fields are valid. The
  61. * packet is discarded if readOverflow() returns true. Some packet parsers may make
  62. * additional reads and in this case readOverflow() must be checked after each set of
  63. * reads to ensure that overflow did not occur.
  64. *
  65. * Buf uses a lock-free pool for extremely fast allocation and deallocation.
  66. *
  67. * Buf can optionally take a template parameter that will be placed in the 'data'
  68. * union as 'fields.' This must be a basic plain data type and must be no larger than
  69. * ZT_BUF_MEM_SIZE. It's typically a packed struct.
  70. *
  71. * Buf instances with different template parameters can freely be cast to one another
  72. * as there is no actual difference in size or layout.
  73. *
  74. * @tparam U Type to overlap with data bytes in data union (can't be larger than ZT_BUF_MEM_SIZE)
  75. */
  76. class Buf
  77. {
  78. friend class SharedPtr<Buf>;
  79. public:
  80. // New and delete operators that allocate Buf instances from a shared lock-free memory pool.
  81. static void *operator new(std::size_t sz);
  82. static void operator delete(void *ptr);
  83. /**
  84. * Free all instances of Buf in shared pool.
  85. *
  86. * New buffers will be created and the pool repopulated if get() is called
  87. * and outstanding buffers will still be returned to the pool. This just
  88. * frees buffers currently held in reserve.
  89. */
  90. static void freePool() noexcept;
  91. /**
  92. * @return Number of Buf objects currently allocated via pool mechanism
  93. */
  94. static long poolAllocated() noexcept;
  95. /**
  96. * Slice is almost exactly like the built-in slice data structure in Go
  97. */
  98. struct Slice : TriviallyCopyable
  99. {
  100. ZT_ALWAYS_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) noexcept : b(b_),s(s_),e(e_) {}
  101. ZT_ALWAYS_INLINE Slice() noexcept : b(),s(0),e(0) {}
  102. ZT_ALWAYS_INLINE operator bool() const noexcept { return (b); }
  103. ZT_ALWAYS_INLINE unsigned int size() const noexcept { return (e - s); }
  104. ZT_ALWAYS_INLINE void zero() noexcept { b.zero(); s = 0; e = 0; }
  105. /**
  106. * Buffer holding slice data
  107. */
  108. SharedPtr<Buf> b;
  109. /**
  110. * Index of start of data in slice
  111. */
  112. unsigned int s;
  113. /**
  114. * Index of end of data in slice (make sure it's greater than or equal to 's'!)
  115. */
  116. unsigned int e;
  117. };
  118. /**
  119. * Assemble all slices in a vector into a single slice starting at position 0
  120. *
  121. * The returned slice will start at 0 and contain the entire vector unless the
  122. * vector is too large to fit in a single buffer. If that or any other error
  123. * occurs the returned slice will be empty and contain a NULL Buf.
  124. *
  125. * The vector may be modified by this function and should be considered
  126. * undefined after it is called.
  127. *
  128. * @tparam FCVC Capacity of FCV (generally inferred automatically)
  129. * @param fcv FCV containing one or more slices
  130. * @return Single slice containing fully assembled buffer (empty on error)
  131. */
  132. template<unsigned int FCVC>
  133. static ZT_ALWAYS_INLINE Buf::Slice assembleSliceVector(FCV<Buf::Slice,FCVC> &fcv) noexcept
  134. {
  135. Buf::Slice r;
  136. typename FCV<Buf::Slice,FCVC>::iterator s(fcv.begin());
  137. unsigned int l = s->e - s->s;
  138. if (l <= ZT_BUF_MEM_SIZE) {
  139. r.b.move(s->b);
  140. if (s->s > 0)
  141. memmove(r.b->unsafeData,r.b->unsafeData + s->s,l);
  142. r.e = l;
  143. while (++s != fcv.end()) {
  144. l = s->e - s->s;
  145. if (l > (ZT_BUF_MEM_SIZE - r.e)) {
  146. r.b.zero();
  147. r.e = 0;
  148. break;
  149. }
  150. memcpy(r.b->unsafeData + r.e,s->b->unsafeData + s->s,l);
  151. s->b.zero(); // let go of buffer in vector as soon as possible
  152. r.e += l;
  153. }
  154. }
  155. return r;
  156. }
  157. /**
  158. * Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
  159. */
  160. ZT_ALWAYS_INLINE Buf() noexcept {}
  161. ZT_ALWAYS_INLINE Buf(const Buf &b2) noexcept { memcpy(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE); }
  162. ZT_ALWAYS_INLINE Buf &operator=(const Buf &b2) noexcept
  163. {
  164. if (this != &b2)
  165. memcpy(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE);
  166. return *this;
  167. }
  168. /**
  169. * Check for overflow beyond the size of the buffer
  170. *
  171. * This is used to check for overflow when writing. It returns true if the iterator
  172. * has passed beyond the capacity of the buffer.
  173. *
  174. * @param ii Iterator to check
  175. * @return True if iterator has read past the size of the buffer
  176. */
  177. static ZT_ALWAYS_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
  178. /**
  179. * Check for overflow beyond the size of the data that should be in the buffer
  180. *
  181. * This is used to check for overflow when reading, with the second argument being the
  182. * size of the meaningful data actually present in the buffer.
  183. *
  184. * @param ii Iterator to check
  185. * @param size Size of data that should be in buffer
  186. * @return True if iterator has read past the size of the data
  187. */
  188. static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
  189. /**
  190. * Set all memory to zero
  191. */
  192. ZT_ALWAYS_INLINE void clear() noexcept { memset(unsafeData,0,ZT_BUF_MEM_SIZE); }
  193. /**
  194. * Zero security critical data using Utils::burn() to ensure it's never optimized out.
  195. */
  196. ZT_ALWAYS_INLINE void burn() noexcept { Utils::burn(unsafeData,ZT_BUF_MEM_SIZE); }
  197. /**
  198. * Read a byte
  199. *
  200. * @param ii Index value-result parameter (incremented by 1)
  201. * @return Byte (undefined on overflow)
  202. */
  203. ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const noexcept
  204. {
  205. const int s = ii++;
  206. return unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK];
  207. }
  208. /**
  209. * Read a 16-bit integer
  210. *
  211. * @param ii Index value-result parameter (incremented by 2)
  212. * @return Integer (undefined on overflow)
  213. */
  214. ZT_ALWAYS_INLINE uint16_t rI16(int &ii) const noexcept
  215. {
  216. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  217. ii += 2;
  218. #ifdef ZT_NO_UNALIGNED_ACCESS
  219. return (
  220. ((uint16_t)data.bytes[s] << 8U) |
  221. (uint16_t)data.bytes[s + 1]);
  222. #else
  223. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  224. #endif
  225. }
  226. /**
  227. * Read a 32-bit integer
  228. *
  229. * @param ii Index value-result parameter (incremented by 4)
  230. * @return Integer (undefined on overflow)
  231. */
  232. ZT_ALWAYS_INLINE uint32_t rI32(int &ii) const noexcept
  233. {
  234. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  235. ii += 4;
  236. #ifdef ZT_NO_UNALIGNED_ACCESS
  237. return (
  238. ((uint32_t)data.bytes[s] << 24U) |
  239. ((uint32_t)data.bytes[s + 1] << 16U) |
  240. ((uint32_t)data.bytes[s + 2] << 8U) |
  241. (uint32_t)data.bytes[s + 3]);
  242. #else
  243. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  244. #endif
  245. }
  246. /**
  247. * Read a 64-bit integer
  248. *
  249. * @param ii Index value-result parameter (incremented by 8)
  250. * @return Integer (undefined on overflow)
  251. */
  252. ZT_ALWAYS_INLINE uint64_t rI64(int &ii) const noexcept
  253. {
  254. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  255. ii += 8;
  256. #ifdef ZT_NO_UNALIGNED_ACCESS
  257. return (
  258. ((uint64_t)data.bytes[s] << 56U) |
  259. ((uint64_t)data.bytes[s + 1] << 48U) |
  260. ((uint64_t)data.bytes[s + 2] << 40U) |
  261. ((uint64_t)data.bytes[s + 3] << 32U) |
  262. ((uint64_t)data.bytes[s + 4] << 24U) |
  263. ((uint64_t)data.bytes[s + 5] << 16U) |
  264. ((uint64_t)data.bytes[s + 6] << 8U) |
  265. (uint64_t)data.bytes[s + 7]);
  266. #else
  267. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  268. #endif
  269. }
  270. /**
  271. * Read an object supporting the marshal/unmarshal interface
  272. *
  273. * If the return value is negative the object's state is undefined. A return value of
  274. * zero typically also indicates a problem, though this may depend on the object type.
  275. *
  276. * Since objects may be invalid even if there is no overflow, it's important to check
  277. * the return value of this function in all cases and discard invalid packets as it
  278. * indicates.
  279. *
  280. * @tparam T Object type
  281. * @param ii Index value-result parameter (incremented by object's size in bytes)
  282. * @param obj Object to read
  283. * @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
  284. */
  285. template<typename T>
  286. ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const noexcept
  287. {
  288. if (ii < ZT_BUF_MEM_SIZE) {
  289. int ms = obj.unmarshal(unsafeData + ii,ZT_BUF_MEM_SIZE - ii);
  290. if (ms > 0)
  291. ii += ms;
  292. return ms;
  293. }
  294. return -1;
  295. }
  296. /**
  297. * Read a C-style string from the buffer, making a copy and advancing the iterator
  298. *
  299. * Use this if the buffer's memory may get changed between reading and processing
  300. * what is read.
  301. *
  302. * @param ii Index value-result parameter (incremented by length of string)
  303. * @param buf Buffer to receive string
  304. * @param bufSize Capacity of buffer in bytes
  305. * @return Pointer to buf or NULL on overflow or error
  306. */
  307. ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
  308. {
  309. const char *const s = (const char *)(unsafeData + ii);
  310. const int sii = ii;
  311. while (ii < ZT_BUF_MEM_SIZE) {
  312. if (unsafeData[ii++] == 0) {
  313. memcpy(buf,s,ii - sii);
  314. return buf;
  315. }
  316. }
  317. return nullptr;
  318. }
  319. /**
  320. * Obtain a pointer to a C-style string in the buffer without copying and advance the iterator
  321. *
  322. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  323. * will indicate that an overflow occurred. As with other reads the string's contents are
  324. * undefined if readOverflow() returns true.
  325. *
  326. * This version avoids a copy and so is faster if the buffer won't be modified between
  327. * reading and processing.
  328. *
  329. * @param ii Index value-result parameter (incremented by length of string)
  330. * @return Pointer to null-terminated C-style string or NULL on overflow or error
  331. */
  332. ZT_ALWAYS_INLINE const char *rSnc(int &ii) const noexcept
  333. {
  334. const char *const s = (const char *)(unsafeData + ii);
  335. while (ii < ZT_BUF_MEM_SIZE) {
  336. if (unsafeData[ii++] == 0)
  337. return s;
  338. }
  339. return nullptr;
  340. }
  341. /**
  342. * Read a byte array from the buffer, making a copy and advancing the iterator
  343. *
  344. * Use this if the buffer's memory may get changed between reading and processing
  345. * what is read.
  346. *
  347. * @param ii Index value-result parameter (incremented by len)
  348. * @param bytes Buffer to contain data to read
  349. * @param len Length of buffer
  350. * @return Pointer to data or NULL on overflow or error
  351. */
  352. ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *const bytes,const unsigned int len) const noexcept
  353. {
  354. if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
  355. memcpy(bytes,unsafeData + ii,len);
  356. return reinterpret_cast<uint8_t *>(bytes);
  357. }
  358. return nullptr;
  359. }
  360. /**
  361. * Obtain a pointer to a field in the buffer without copying and advance the iterator
  362. *
  363. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  364. * will indicate that an overflow occurred.
  365. *
  366. * This version avoids a copy and so is faster if the buffer won't be modified between
  367. * reading and processing.
  368. *
  369. * @param ii Index value-result parameter (incremented by len)
  370. * @param len Length of data field to obtain a pointer to
  371. * @return Pointer to field or NULL on overflow
  372. */
  373. ZT_ALWAYS_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const noexcept
  374. {
  375. const uint8_t *const b = unsafeData + ii;
  376. return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
  377. }
  378. /**
  379. * Load a value at an index without advancing the index
  380. *
  381. * Note that unlike the rI??() methods this does not increment ii and therefore
  382. * will not necessarily result in a 'true' return from readOverflow(). It does
  383. * however subject 'ii' to soft bounds masking like the gI??() methods.
  384. */
  385. ZT_ALWAYS_INLINE uint8_t lI8(const int ii) const noexcept
  386. {
  387. return unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK];
  388. }
  389. /**
  390. * Load a value at an index without advancing the index
  391. *
  392. * Note that unlike the rI??() methods this does not increment ii and therefore
  393. * will not necessarily result in a 'true' return from readOverflow(). It does
  394. * however subject 'ii' to soft bounds masking like the gI??() methods.
  395. */
  396. ZT_ALWAYS_INLINE uint16_t lI16(const int ii) const noexcept
  397. {
  398. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  399. #ifdef ZT_NO_UNALIGNED_ACCESS
  400. return (
  401. ((uint16_t)data.bytes[s] << 8U) |
  402. (uint16_t)data.bytes[s + 1]);
  403. #else
  404. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  405. #endif
  406. }
  407. /**
  408. * Load a value at an index without advancing the index
  409. *
  410. * Note that unlike the rI??() methods this does not increment ii and therefore
  411. * will not necessarily result in a 'true' return from readOverflow(). It does
  412. * however subject 'ii' to soft bounds masking like the gI??() methods.
  413. */
  414. ZT_ALWAYS_INLINE uint32_t lI32(const int ii) const noexcept
  415. {
  416. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  417. #ifdef ZT_NO_UNALIGNED_ACCESS
  418. return (
  419. ((uint32_t)data.bytes[s] << 24U) |
  420. ((uint32_t)data.bytes[s + 1] << 16U) |
  421. ((uint32_t)data.bytes[s + 2] << 8U) |
  422. (uint32_t)data.bytes[s + 3]);
  423. #else
  424. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  425. #endif
  426. }
  427. /**
  428. * Load a value at an index without advancing the index
  429. *
  430. * Note that unlike the rI??() methods this does not increment ii and therefore
  431. * will not necessarily result in a 'true' return from readOverflow(). It does
  432. * however subject 'ii' to soft bounds masking like the gI??() methods.
  433. */
  434. ZT_ALWAYS_INLINE uint8_t lI64(const int ii) const noexcept
  435. {
  436. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  437. #ifdef ZT_NO_UNALIGNED_ACCESS
  438. return (
  439. ((uint64_t)data.bytes[s] << 56U) |
  440. ((uint64_t)data.bytes[s + 1] << 48U) |
  441. ((uint64_t)data.bytes[s + 2] << 40U) |
  442. ((uint64_t)data.bytes[s + 3] << 32U) |
  443. ((uint64_t)data.bytes[s + 4] << 24U) |
  444. ((uint64_t)data.bytes[s + 5] << 16U) |
  445. ((uint64_t)data.bytes[s + 6] << 8U) |
  446. (uint64_t)data.bytes[s + 7]);
  447. #else
  448. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  449. #endif
  450. }
  451. /**
  452. * Write a byte
  453. *
  454. * @param ii Index value-result parameter (incremented by 1)
  455. * @param n Byte
  456. */
  457. ZT_ALWAYS_INLINE void wI8(int &ii,const uint8_t n) noexcept
  458. {
  459. const int s = ii++;
  460. unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
  461. }
  462. /**
  463. * Write a 16-bit integer in big-endian byte order
  464. *
  465. * @param ii Index value-result parameter (incremented by 2)
  466. * @param n Integer
  467. */
  468. ZT_ALWAYS_INLINE void wI16(int &ii,const uint16_t n) noexcept
  469. {
  470. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  471. ii += 2;
  472. #ifdef ZT_NO_UNALIGNED_ACCESS
  473. b[s] = (uint8_t)(n >> 8U);
  474. b[s + 1] = (uint8_t)n;
  475. #else
  476. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  477. #endif
  478. }
  479. /**
  480. * Write a 32-bit integer in big-endian byte order
  481. *
  482. * @param ii Index value-result parameter (incremented by 4)
  483. * @param n Integer
  484. */
  485. ZT_ALWAYS_INLINE void wI32(int &ii,const uint32_t n) noexcept
  486. {
  487. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  488. ii += 4;
  489. #ifdef ZT_NO_UNALIGNED_ACCESS
  490. b[s] = (uint8_t)(n >> 24U);
  491. b[s + 1] = (uint8_t)(n >> 16U);
  492. b[s + 2] = (uint8_t)(n >> 8U);
  493. b[s + 3] = (uint8_t)n;
  494. #else
  495. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  496. #endif
  497. }
  498. /**
  499. * Write a 64-bit integer in big-endian byte order
  500. *
  501. * @param ii Index value-result parameter (incremented by 8)
  502. * @param n Integer
  503. */
  504. ZT_ALWAYS_INLINE void wI64(int &ii,const uint64_t n) noexcept
  505. {
  506. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  507. ii += 8;
  508. #ifdef ZT_NO_UNALIGNED_ACCESS
  509. b[s] = (uint8_t)(n >> 56U);
  510. b[s + 1] = (uint8_t)(n >> 48U);
  511. b[s + 2] = (uint8_t)(n >> 40U);
  512. b[s + 3] = (uint8_t)(n >> 32U);
  513. b[s + 4] = (uint8_t)(n >> 24U);
  514. b[s + 5] = (uint8_t)(n >> 16U);
  515. b[s + 6] = (uint8_t)(n >> 8U);
  516. b[s + 7] = (uint8_t)n;
  517. #else
  518. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  519. #endif
  520. }
  521. /**
  522. * Write an object implementing the marshal interface
  523. *
  524. * @tparam T Object type
  525. * @param ii Index value-result parameter (incremented by size of object)
  526. * @param t Object to write
  527. */
  528. template<typename T>
  529. ZT_ALWAYS_INLINE void wO(int &ii,T &t) noexcept
  530. {
  531. const int s = ii;
  532. if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
  533. int ms = t.marshal(unsafeData + s);
  534. if (ms > 0)
  535. ii += ms;
  536. } else {
  537. ii += T::marshalSizeMax(); // mark as overflowed even if we didn't do anything
  538. }
  539. }
  540. /**
  541. * Write a C-style null-terminated string (including the trailing zero)
  542. *
  543. * @param ii Index value-result parameter (incremented by length of string)
  544. * @param s String to write (writes an empty string if this is NULL)
  545. */
  546. ZT_ALWAYS_INLINE void wS(int &ii,const char *s) noexcept
  547. {
  548. if (s) {
  549. char c;
  550. do {
  551. c = *(s++);
  552. wI8(ii,(uint8_t)c);
  553. } while (c);
  554. } else {
  555. wI8(ii,0);
  556. }
  557. }
  558. /**
  559. * Write a byte array
  560. *
  561. * @param ii Index value-result parameter (incremented by len)
  562. * @param bytes Bytes to write
  563. * @param len Size of data in bytes
  564. */
  565. ZT_ALWAYS_INLINE void wB(int &ii,const void *const bytes,const unsigned int len) noexcept
  566. {
  567. const int s = ii;
  568. if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
  569. memcpy(unsafeData + s,bytes,len);
  570. }
  571. /**
  572. * Store a byte without advancing the index
  573. */
  574. ZT_ALWAYS_INLINE void sI8(const int ii,const uint8_t n) noexcept
  575. {
  576. unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK] = n;
  577. }
  578. /**
  579. * Store an integer without advancing the index
  580. */
  581. ZT_ALWAYS_INLINE void sI16(const int ii,const uint16_t n) noexcept
  582. {
  583. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  584. #ifdef ZT_NO_UNALIGNED_ACCESS
  585. b[s] = (uint8_t)(n >> 8U);
  586. b[s + 1] = (uint8_t)n;
  587. #else
  588. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  589. #endif
  590. }
  591. /**
  592. * Store an integer without advancing the index
  593. */
  594. ZT_ALWAYS_INLINE void sI32(const int ii,const uint32_t n) noexcept
  595. {
  596. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  597. #ifdef ZT_NO_UNALIGNED_ACCESS
  598. b[s] = (uint8_t)(n >> 24U);
  599. b[s + 1] = (uint8_t)(n >> 16U);
  600. b[s + 2] = (uint8_t)(n >> 8U);
  601. b[s + 3] = (uint8_t)n;
  602. #else
  603. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  604. #endif
  605. }
  606. /**
  607. * Store an integer without advancing the index
  608. */
  609. ZT_ALWAYS_INLINE void sI64(const int ii,const uint64_t n) noexcept
  610. {
  611. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  612. #ifdef ZT_NO_UNALIGNED_ACCESS
  613. b[s] = (uint8_t)(n >> 56U);
  614. b[s + 1] = (uint8_t)(n >> 48U);
  615. b[s + 2] = (uint8_t)(n >> 40U);
  616. b[s + 3] = (uint8_t)(n >> 32U);
  617. b[s + 4] = (uint8_t)(n >> 24U);
  618. b[s + 5] = (uint8_t)(n >> 16U);
  619. b[s + 6] = (uint8_t)(n >> 8U);
  620. b[s + 7] = (uint8_t)n;
  621. #else
  622. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  623. #endif
  624. }
  625. /**
  626. * @return Capacity of this buffer (usable size of data.bytes)
  627. */
  628. static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
  629. /**
  630. * Cast data in 'b' to a (usually packed) structure type
  631. *
  632. * Warning: this does no bounds checking. It should only be used with packed
  633. * struct types designed for use in packet decoding such as those in
  634. * Protocol.hpp, and if 'i' is non-zero the caller must check bounds.
  635. *
  636. * @tparam T Structure type to cast 'b' to
  637. * @param i Index of start of structure (default: 0)
  638. * @return Reference to 'b' cast to type T
  639. */
  640. template<typename T>
  641. ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) noexcept { return *reinterpret_cast<T *>(unsafeData + i); }
  642. /**
  643. * Cast data in 'b' to a (usually packed) structure type (const)
  644. *
  645. * Warning: this does no bounds checking. It should only be used with packed
  646. * struct types designed for use in packet decoding such as those in
  647. * Protocol.hpp, and if 'i' is non-zero the caller must check bounds.
  648. *
  649. * @tparam T Structure type to cast 'b' to
  650. * @param i Index of start of structure (default: 0)
  651. * @return Reference to 'b' cast to type T
  652. */
  653. template<typename T>
  654. ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const noexcept { return *reinterpret_cast<const T *>(unsafeData + i); }
  655. ZT_ALWAYS_INLINE bool operator==(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) == 0); }
  656. ZT_ALWAYS_INLINE bool operator!=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) != 0); }
  657. ZT_ALWAYS_INLINE bool operator<(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) < 0); }
  658. ZT_ALWAYS_INLINE bool operator<=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) <= 0); }
  659. ZT_ALWAYS_INLINE bool operator>(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) > 0); }
  660. ZT_ALWAYS_INLINE bool operator>=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) >= 0); }
  661. /**
  662. * Raw data held in buffer
  663. *
  664. * The additional eight bytes should not be used and should be considered undefined.
  665. * They exist to allow reads and writes of integer types to silently overflow if a
  666. * read or write is performed at the end of the buffer.
  667. */
  668. uint8_t unsafeData[ZT_BUF_MEM_SIZE + 8];
  669. private:
  670. // Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
  671. std::atomic<uintptr_t> __nextInPool;
  672. // Reference counter for SharedPtr<>
  673. std::atomic<int> __refCount;
  674. };
  675. } // namespace ZeroTier
  676. #endif