Buf.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_BUF_HPP
  14. #define ZT_BUF_HPP
  15. #include "Constants.hpp"
  16. #include "Utils.hpp"
  17. #include "SharedPtr.hpp"
  18. #include "Mutex.hpp"
  19. #include "TriviallyCopyable.hpp"
  20. #include "FCV.hpp"
  21. #include <cstdint>
  22. #include <cstring>
  23. #include <cstdlib>
  24. #include <stdexcept>
  25. #include <utility>
  26. #include <algorithm>
  27. #include <new>
  28. // Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
  29. // and is a power of two. It needs to be a power of two because masking is significantly faster
  30. // than integer division modulus.
  31. #define ZT_BUF_MEM_SIZE 0x00004000
  32. #define ZT_BUF_MEM_MASK 0x00003fffU
  33. namespace ZeroTier {
  34. /**
  35. * Buffer and methods for branch-free bounds-checked data assembly and parsing
  36. *
  37. * This implements an extremely fast buffer for packet assembly and parsing that avoids
  38. * branching whenever possible. To be safe it must be used correctly!
  39. *
  40. * The read methods are prefixed by 'r', and write methods with 'w'. All methods take
  41. * an iterator, which is just an int that should be initialized to 0 (or whatever starting
  42. * position is desired). All read methods will advance the iterator regardless of outcome.
  43. *
  44. * Read and write methods fail silently in the event of overflow. They do not corrupt or
  45. * access memory outside the bounds of Buf, but will otherwise produce undefined results.
  46. *
  47. * IT IS THE RESPONSIBILITY OF THE USER of this class to use the readOverflow() and
  48. * writeOverflow() static methods to check the iterator for overflow after each series
  49. * of reads and writes and BEFORE ANY PARSING or other decisions are made on the basis
  50. * of the data obtained from a buffer. Failure to do so can result in bugs due
  51. * to parsing and branching on undefined or corrupt data.
  52. *
  53. * ^^ THIS IS VERY IMPORTANT ^^
  54. *
  55. * A typical packet assembly consists of repeated calls to the write methods followed by
  56. * a check to writeOverflow() before final packet armoring and transport. A typical packet
  57. * disassembly and parsing consists of a series of read calls to obtain the packet's
  58. * fields followed by a call to readOverflow() to check that these fields are valid. The
  59. * packet is discarded if readOverflow() returns true. Some packet parsers may make
  60. * additional reads and in this case readOverflow() must be checked after each set of
  61. * reads to ensure that overflow did not occur.
  62. *
  63. * Buf uses a lock-free pool for extremely fast allocation and deallocation.
  64. *
  65. * Buf can optionally take a template parameter that will be placed in the 'data'
  66. * union as 'fields.' This must be a basic plain data type and must be no larger than
  67. * ZT_BUF_MEM_SIZE. It's typically a packed struct.
  68. *
  69. * Buf instances with different template parameters can freely be cast to one another
  70. * as there is no actual difference in size or layout.
  71. *
  72. * @tparam U Type to overlap with data bytes in data union (can't be larger than ZT_BUF_MEM_SIZE)
  73. */
  74. class Buf
  75. {
  76. friend class SharedPtr<Buf>;
  77. public:
  78. // New and delete operators that allocate Buf instances from a shared lock-free memory pool.
  79. static void *operator new(std::size_t sz);
  80. static void operator delete(void *ptr);
  81. /**
  82. * Free all instances of Buf in shared pool.
  83. *
  84. * New buffers will be created and the pool repopulated if get() is called
  85. * and outstanding buffers will still be returned to the pool. This just
  86. * frees buffers currently held in reserve.
  87. */
  88. static void freePool() noexcept;
  89. /**
  90. * Slice is almost exactly like the built-in slice data structure in Go
  91. */
  92. struct Slice : TriviallyCopyable
  93. {
  94. ZT_ALWAYS_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) noexcept : b(b_),s(s_),e(e_) {}
  95. ZT_ALWAYS_INLINE Slice() noexcept : b(),s(0),e(0) {}
  96. ZT_ALWAYS_INLINE operator bool() const noexcept { return (b); }
  97. ZT_ALWAYS_INLINE unsigned int size() const noexcept { return (e - s); }
  98. ZT_ALWAYS_INLINE void zero() noexcept { b.zero(); s = 0; e = 0; }
  99. /**
  100. * Buffer holding slice data
  101. */
  102. SharedPtr<Buf> b;
  103. /**
  104. * Index of start of data in slice
  105. */
  106. unsigned int s;
  107. /**
  108. * Index of end of data in slice (make sure it's greater than or equal to 's'!)
  109. */
  110. unsigned int e;
  111. };
  112. /**
  113. * Assemble all slices in a vector into a single slice starting at position 0
  114. *
  115. * The returned slice will start at 0 and contain the entire vector unless the
  116. * vector is too large to fit in a single buffer. If that or any other error
  117. * occurs the returned slice will be empty and contain a NULL Buf.
  118. *
  119. * The vector may be modified by this function and should be considered
  120. * undefined after it is called.
  121. *
  122. * @tparam FCVC Capacity of FCV (generally inferred automatically)
  123. * @param fcv FCV containing one or more slices
  124. * @return Single slice containing fully assembled buffer (empty on error)
  125. */
  126. template<unsigned int FCVC>
  127. static ZT_ALWAYS_INLINE Buf::Slice assembleSliceVector(FCV<Buf::Slice,FCVC> &fcv) noexcept
  128. {
  129. Buf::Slice r;
  130. typename FCV<Buf::Slice,FCVC>::const_iterator s(fcv.begin());
  131. unsigned int l = s->e - s->s;
  132. if (l <= ZT_BUF_MEM_SIZE) {
  133. r.b.move(s->b);
  134. if (s->s > 0)
  135. memmove(r.b->unsafeData,r.b->unsafeData + s->s,l);
  136. r.e = l;
  137. while (++s != fcv.end()) {
  138. l = s->e - s->s;
  139. if (l > (ZT_BUF_MEM_SIZE - r.e)) {
  140. r.b.zero();
  141. r.e = 0;
  142. break;
  143. }
  144. memcpy(r.b->unsafeData + r.e,s->b->unsafeData + s->s,l);
  145. s->b.zero(); // let go of buffer in vector as soon as possible
  146. r.e += l;
  147. }
  148. }
  149. return r;
  150. }
  151. /**
  152. * Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
  153. */
  154. ZT_ALWAYS_INLINE Buf() noexcept {}
  155. ZT_ALWAYS_INLINE Buf(const Buf &b2) noexcept { memcpy(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE); }
  156. ZT_ALWAYS_INLINE Buf &operator=(const Buf &b2) noexcept
  157. {
  158. if (this != &b2)
  159. memcpy(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE);
  160. return *this;
  161. }
  162. /**
  163. * Check for overflow beyond the size of the buffer
  164. *
  165. * This is used to check for overflow when writing. It returns true if the iterator
  166. * has passed beyond the capacity of the buffer.
  167. *
  168. * @param ii Iterator to check
  169. * @return True if iterator has read past the size of the buffer
  170. */
  171. static ZT_ALWAYS_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
  172. /**
  173. * Check for overflow beyond the size of the data that should be in the buffer
  174. *
  175. * This is used to check for overflow when reading, with the second argument being the
  176. * size of the meaningful data actually present in the buffer.
  177. *
  178. * @param ii Iterator to check
  179. * @param size Size of data that should be in buffer
  180. * @return True if iterator has read past the size of the data
  181. */
  182. static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
  183. /**
  184. * Set all memory to zero
  185. */
  186. ZT_ALWAYS_INLINE void clear() noexcept { memset(unsafeData,0,ZT_BUF_MEM_SIZE); }
  187. /**
  188. * Zero security critical data using Utils::burn() to ensure it's never optimized out.
  189. */
  190. ZT_ALWAYS_INLINE void burn() noexcept { Utils::burn(unsafeData,ZT_BUF_MEM_SIZE); }
  191. /**
  192. * Read a byte
  193. *
  194. * @param ii Index value-result parameter (incremented by 1)
  195. * @return Byte (undefined on overflow)
  196. */
  197. ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const noexcept
  198. {
  199. const int s = ii++;
  200. return unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK];
  201. }
  202. /**
  203. * Read a 16-bit integer
  204. *
  205. * @param ii Index value-result parameter (incremented by 2)
  206. * @return Integer (undefined on overflow)
  207. */
  208. ZT_ALWAYS_INLINE uint16_t rI16(int &ii) const noexcept
  209. {
  210. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  211. ii += 2;
  212. #ifdef ZT_NO_UNALIGNED_ACCESS
  213. return (
  214. ((uint16_t)data.bytes[s] << 8U) |
  215. (uint16_t)data.bytes[s + 1]);
  216. #else
  217. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  218. #endif
  219. }
  220. /**
  221. * Read a 32-bit integer
  222. *
  223. * @param ii Index value-result parameter (incremented by 4)
  224. * @return Integer (undefined on overflow)
  225. */
  226. ZT_ALWAYS_INLINE uint32_t rI32(int &ii) const noexcept
  227. {
  228. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  229. ii += 4;
  230. #ifdef ZT_NO_UNALIGNED_ACCESS
  231. return (
  232. ((uint32_t)data.bytes[s] << 24U) |
  233. ((uint32_t)data.bytes[s + 1] << 16U) |
  234. ((uint32_t)data.bytes[s + 2] << 8U) |
  235. (uint32_t)data.bytes[s + 3]);
  236. #else
  237. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  238. #endif
  239. }
  240. /**
  241. * Read a 64-bit integer
  242. *
  243. * @param ii Index value-result parameter (incremented by 8)
  244. * @return Integer (undefined on overflow)
  245. */
  246. ZT_ALWAYS_INLINE uint64_t rI64(int &ii) const noexcept
  247. {
  248. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  249. ii += 8;
  250. #ifdef ZT_NO_UNALIGNED_ACCESS
  251. return (
  252. ((uint64_t)data.bytes[s] << 56U) |
  253. ((uint64_t)data.bytes[s + 1] << 48U) |
  254. ((uint64_t)data.bytes[s + 2] << 40U) |
  255. ((uint64_t)data.bytes[s + 3] << 32U) |
  256. ((uint64_t)data.bytes[s + 4] << 24U) |
  257. ((uint64_t)data.bytes[s + 5] << 16U) |
  258. ((uint64_t)data.bytes[s + 6] << 8U) |
  259. (uint64_t)data.bytes[s + 7]);
  260. #else
  261. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  262. #endif
  263. }
  264. /**
  265. * Read an object supporting the marshal/unmarshal interface
  266. *
  267. * If the return value is negative the object's state is undefined. A return value of
  268. * zero typically also indicates a problem, though this may depend on the object type.
  269. *
  270. * Since objects may be invalid even if there is no overflow, it's important to check
  271. * the return value of this function in all cases and discard invalid packets as it
  272. * indicates.
  273. *
  274. * @tparam T Object type
  275. * @param ii Index value-result parameter (incremented by object's size in bytes)
  276. * @param obj Object to read
  277. * @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
  278. */
  279. template<typename T>
  280. ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const noexcept
  281. {
  282. if (ii < ZT_BUF_MEM_SIZE) {
  283. int ms = obj.unmarshal(unsafeData + ii,ZT_BUF_MEM_SIZE - ii);
  284. if (ms > 0)
  285. ii += ms;
  286. return ms;
  287. }
  288. return -1;
  289. }
  290. /**
  291. * Read a C-style string from the buffer, making a copy and advancing the iterator
  292. *
  293. * Use this if the buffer's memory may get changed between reading and processing
  294. * what is read.
  295. *
  296. * @param ii Index value-result parameter (incremented by length of string)
  297. * @param buf Buffer to receive string
  298. * @param bufSize Capacity of buffer in bytes
  299. * @return Pointer to buf or NULL on overflow or error
  300. */
  301. ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
  302. {
  303. const char *const s = (const char *)(unsafeData + ii);
  304. const int sii = ii;
  305. while (ii < ZT_BUF_MEM_SIZE) {
  306. if (unsafeData[ii++] == 0) {
  307. memcpy(buf,s,ii - sii);
  308. return buf;
  309. }
  310. }
  311. return nullptr;
  312. }
  313. /**
  314. * Obtain a pointer to a C-style string in the buffer without copying and advance the iterator
  315. *
  316. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  317. * will indicate that an overflow occurred. As with other reads the string's contents are
  318. * undefined if readOverflow() returns true.
  319. *
  320. * This version avoids a copy and so is faster if the buffer won't be modified between
  321. * reading and processing.
  322. *
  323. * @param ii Index value-result parameter (incremented by length of string)
  324. * @return Pointer to null-terminated C-style string or NULL on overflow or error
  325. */
  326. ZT_ALWAYS_INLINE const char *rSnc(int &ii) const noexcept
  327. {
  328. const char *const s = (const char *)(unsafeData + ii);
  329. while (ii < ZT_BUF_MEM_SIZE) {
  330. if (unsafeData[ii++] == 0)
  331. return s;
  332. }
  333. return nullptr;
  334. }
  335. /**
  336. * Read a byte array from the buffer, making a copy and advancing the iterator
  337. *
  338. * Use this if the buffer's memory may get changed between reading and processing
  339. * what is read.
  340. *
  341. * @param ii Index value-result parameter (incremented by len)
  342. * @param bytes Buffer to contain data to read
  343. * @param len Length of buffer
  344. * @return Pointer to data or NULL on overflow or error
  345. */
  346. ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *const bytes,const unsigned int len) const noexcept
  347. {
  348. if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
  349. memcpy(bytes,unsafeData + ii,len);
  350. return reinterpret_cast<uint8_t *>(bytes);
  351. }
  352. return nullptr;
  353. }
  354. /**
  355. * Obtain a pointer to a field in the buffer without copying and advance the iterator
  356. *
  357. * The iterator is advanced even if this fails and returns NULL so that readOverflow()
  358. * will indicate that an overflow occurred.
  359. *
  360. * This version avoids a copy and so is faster if the buffer won't be modified between
  361. * reading and processing.
  362. *
  363. * @param ii Index value-result parameter (incremented by len)
  364. * @param len Length of data field to obtain a pointer to
  365. * @return Pointer to field or NULL on overflow
  366. */
  367. ZT_ALWAYS_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const noexcept
  368. {
  369. const uint8_t *const b = b + ii;
  370. return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
  371. }
  372. /**
  373. * Load a value at an index without advancing the index
  374. *
  375. * Note that unlike the rI??() methods this does not increment ii and therefore
  376. * will not necessarily result in a 'true' return from readOverflow(). It does
  377. * however subject 'ii' to soft bounds masking like the gI??() methods.
  378. */
  379. ZT_ALWAYS_INLINE uint8_t lI8(const int ii) const noexcept
  380. {
  381. return unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK];
  382. }
  383. /**
  384. * Load a value at an index without advancing the index
  385. *
  386. * Note that unlike the rI??() methods this does not increment ii and therefore
  387. * will not necessarily result in a 'true' return from readOverflow(). It does
  388. * however subject 'ii' to soft bounds masking like the gI??() methods.
  389. */
  390. ZT_ALWAYS_INLINE uint16_t lI16(const int ii) const noexcept
  391. {
  392. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  393. #ifdef ZT_NO_UNALIGNED_ACCESS
  394. return (
  395. ((uint16_t)data.bytes[s] << 8U) |
  396. (uint16_t)data.bytes[s + 1]);
  397. #else
  398. return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
  399. #endif
  400. }
  401. /**
  402. * Load a value at an index without advancing the index
  403. *
  404. * Note that unlike the rI??() methods this does not increment ii and therefore
  405. * will not necessarily result in a 'true' return from readOverflow(). It does
  406. * however subject 'ii' to soft bounds masking like the gI??() methods.
  407. */
  408. ZT_ALWAYS_INLINE uint32_t lI32(const int ii) const noexcept
  409. {
  410. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  411. #ifdef ZT_NO_UNALIGNED_ACCESS
  412. return (
  413. ((uint32_t)data.bytes[s] << 24U) |
  414. ((uint32_t)data.bytes[s + 1] << 16U) |
  415. ((uint32_t)data.bytes[s + 2] << 8U) |
  416. (uint32_t)data.bytes[s + 3]);
  417. #else
  418. return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
  419. #endif
  420. }
  421. /**
  422. * Load a value at an index without advancing the index
  423. *
  424. * Note that unlike the rI??() methods this does not increment ii and therefore
  425. * will not necessarily result in a 'true' return from readOverflow(). It does
  426. * however subject 'ii' to soft bounds masking like the gI??() methods.
  427. */
  428. ZT_ALWAYS_INLINE uint8_t lI64(const int ii) const noexcept
  429. {
  430. const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
  431. #ifdef ZT_NO_UNALIGNED_ACCESS
  432. return (
  433. ((uint64_t)data.bytes[s] << 56U) |
  434. ((uint64_t)data.bytes[s + 1] << 48U) |
  435. ((uint64_t)data.bytes[s + 2] << 40U) |
  436. ((uint64_t)data.bytes[s + 3] << 32U) |
  437. ((uint64_t)data.bytes[s + 4] << 24U) |
  438. ((uint64_t)data.bytes[s + 5] << 16U) |
  439. ((uint64_t)data.bytes[s + 6] << 8U) |
  440. (uint64_t)data.bytes[s + 7]);
  441. #else
  442. return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
  443. #endif
  444. }
  445. /**
  446. * Write a byte
  447. *
  448. * @param ii Index value-result parameter (incremented by 1)
  449. * @param n Byte
  450. */
  451. ZT_ALWAYS_INLINE void wI8(int &ii,const uint8_t n) noexcept
  452. {
  453. const int s = ii++;
  454. unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
  455. }
  456. /**
  457. * Write a 16-bit integer in big-endian byte order
  458. *
  459. * @param ii Index value-result parameter (incremented by 2)
  460. * @param n Integer
  461. */
  462. ZT_ALWAYS_INLINE void wI16(int &ii,const uint16_t n) noexcept
  463. {
  464. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  465. ii += 2;
  466. #ifdef ZT_NO_UNALIGNED_ACCESS
  467. b[s] = (uint8_t)(n >> 8U);
  468. b[s + 1] = (uint8_t)n;
  469. #else
  470. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  471. #endif
  472. }
  473. /**
  474. * Write a 32-bit integer in big-endian byte order
  475. *
  476. * @param ii Index value-result parameter (incremented by 4)
  477. * @param n Integer
  478. */
  479. ZT_ALWAYS_INLINE void wI32(int &ii,const uint32_t n) noexcept
  480. {
  481. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  482. ii += 4;
  483. #ifdef ZT_NO_UNALIGNED_ACCESS
  484. b[s] = (uint8_t)(n >> 24U);
  485. b[s + 1] = (uint8_t)(n >> 16U);
  486. b[s + 2] = (uint8_t)(n >> 8U);
  487. b[s + 3] = (uint8_t)n;
  488. #else
  489. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  490. #endif
  491. }
  492. /**
  493. * Write a 64-bit integer in big-endian byte order
  494. *
  495. * @param ii Index value-result parameter (incremented by 8)
  496. * @param n Integer
  497. */
  498. ZT_ALWAYS_INLINE void wI64(int &ii,const uint64_t n) noexcept
  499. {
  500. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  501. ii += 8;
  502. #ifdef ZT_NO_UNALIGNED_ACCESS
  503. b[s] = (uint8_t)(n >> 56U);
  504. b[s + 1] = (uint8_t)(n >> 48U);
  505. b[s + 2] = (uint8_t)(n >> 40U);
  506. b[s + 3] = (uint8_t)(n >> 32U);
  507. b[s + 4] = (uint8_t)(n >> 24U);
  508. b[s + 5] = (uint8_t)(n >> 16U);
  509. b[s + 6] = (uint8_t)(n >> 8U);
  510. b[s + 7] = (uint8_t)n;
  511. #else
  512. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  513. #endif
  514. }
  515. /**
  516. * Write an object implementing the marshal interface
  517. *
  518. * @tparam T Object type
  519. * @param ii Index value-result parameter (incremented by size of object)
  520. * @param t Object to write
  521. */
  522. template<typename T>
  523. ZT_ALWAYS_INLINE void wO(int &ii,T &t) noexcept
  524. {
  525. const int s = ii;
  526. if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
  527. int ms = t.marshal(unsafeData + s);
  528. if (ms > 0)
  529. ii += ms;
  530. } else {
  531. ii += T::marshalSizeMax(); // mark as overflowed even if we didn't do anything
  532. }
  533. }
  534. /**
  535. * Write a C-style null-terminated string (including the trailing zero)
  536. *
  537. * @param ii Index value-result parameter (incremented by length of string)
  538. * @param s String to write (writes an empty string if this is NULL)
  539. */
  540. ZT_ALWAYS_INLINE void wS(int &ii,const char *s) noexcept
  541. {
  542. if (s) {
  543. char c;
  544. do {
  545. c = *(s++);
  546. wI8(ii,(uint8_t)c);
  547. } while (c);
  548. } else {
  549. wI8(ii,0);
  550. }
  551. }
  552. /**
  553. * Write a byte array
  554. *
  555. * @param ii Index value-result parameter (incremented by len)
  556. * @param bytes Bytes to write
  557. * @param len Size of data in bytes
  558. */
  559. ZT_ALWAYS_INLINE void wB(int &ii,const void *const bytes,const unsigned int len) noexcept
  560. {
  561. const int s = ii;
  562. if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
  563. memcpy(unsafeData + s,bytes,len);
  564. }
  565. /**
  566. * Store a byte without advancing the index
  567. */
  568. ZT_ALWAYS_INLINE void sI8(const int ii,const uint8_t n) noexcept
  569. {
  570. unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK] = n;
  571. }
  572. /**
  573. * Store an integer without advancing the index
  574. */
  575. ZT_ALWAYS_INLINE void sI16(const int ii,const uint16_t n) noexcept
  576. {
  577. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  578. #ifdef ZT_NO_UNALIGNED_ACCESS
  579. b[s] = (uint8_t)(n >> 8U);
  580. b[s + 1] = (uint8_t)n;
  581. #else
  582. *reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
  583. #endif
  584. }
  585. /**
  586. * Store an integer without advancing the index
  587. */
  588. ZT_ALWAYS_INLINE void sI32(const int ii,const uint32_t n) noexcept
  589. {
  590. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  591. #ifdef ZT_NO_UNALIGNED_ACCESS
  592. b[s] = (uint8_t)(n >> 24U);
  593. b[s + 1] = (uint8_t)(n >> 16U);
  594. b[s + 2] = (uint8_t)(n >> 8U);
  595. b[s + 3] = (uint8_t)n;
  596. #else
  597. *reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
  598. #endif
  599. }
  600. /**
  601. * Store an integer without advancing the index
  602. */
  603. ZT_ALWAYS_INLINE void sI64(const int ii,const uint64_t n) noexcept
  604. {
  605. const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
  606. #ifdef ZT_NO_UNALIGNED_ACCESS
  607. b[s] = (uint8_t)(n >> 56U);
  608. b[s + 1] = (uint8_t)(n >> 48U);
  609. b[s + 2] = (uint8_t)(n >> 40U);
  610. b[s + 3] = (uint8_t)(n >> 32U);
  611. b[s + 4] = (uint8_t)(n >> 24U);
  612. b[s + 5] = (uint8_t)(n >> 16U);
  613. b[s + 6] = (uint8_t)(n >> 8U);
  614. b[s + 7] = (uint8_t)n;
  615. #else
  616. *reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
  617. #endif
  618. }
  619. /**
  620. * @return Capacity of this buffer (usable size of data.bytes)
  621. */
  622. static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
  623. /**
  624. * Cast data in 'b' to a (usually packed) structure type
  625. *
  626. * Warning: this does no bounds checking. It should only be used with packed
  627. * struct types designed for use in packet decoding such as those in
  628. * Protocol.hpp, and if 'i' is non-zero the caller must check bounds.
  629. *
  630. * @tparam T Structure type to cast 'b' to
  631. * @param i Index of start of structure (default: 0)
  632. * @return Reference to 'b' cast to type T
  633. */
  634. template<typename T>
  635. ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) noexcept { return *reinterpret_cast<T *>(unsafeData + i); }
  636. /**
  637. * Cast data in 'b' to a (usually packed) structure type (const)
  638. *
  639. * Warning: this does no bounds checking. It should only be used with packed
  640. * struct types designed for use in packet decoding such as those in
  641. * Protocol.hpp, and if 'i' is non-zero the caller must check bounds.
  642. *
  643. * @tparam T Structure type to cast 'b' to
  644. * @param i Index of start of structure (default: 0)
  645. * @return Reference to 'b' cast to type T
  646. */
  647. template<typename T>
  648. ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const noexcept { return *reinterpret_cast<const T *>(unsafeData + i); }
  649. ZT_ALWAYS_INLINE bool operator==(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) == 0); }
  650. ZT_ALWAYS_INLINE bool operator!=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) != 0); }
  651. ZT_ALWAYS_INLINE bool operator<(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) < 0); }
  652. ZT_ALWAYS_INLINE bool operator<=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) <= 0); }
  653. ZT_ALWAYS_INLINE bool operator>(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) > 0); }
  654. ZT_ALWAYS_INLINE bool operator>=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) >= 0); }
  655. /**
  656. * Raw data held in buffer
  657. *
  658. * The additional eight bytes should not be used and should be considered undefined.
  659. * They exist to allow reads and writes of integer types to silently overflow if a
  660. * read or write is performed at the end of the buffer.
  661. */
  662. uint8_t unsafeData[ZT_BUF_MEM_SIZE + 8];
  663. private:
  664. // Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
  665. std::atomic<uintptr_t> __nextInPool;
  666. // Reference counter for SharedPtr<>
  667. std::atomic<int> __refCount;
  668. };
  669. } // namespace ZeroTier
  670. #endif