Browse Source

Yet more major and very nit-picky refactoring for performance, etc. Also use std::atomic<> now with a TODO to implement a shim if we ever do need to build this on a pre-c++11 compiler.

Adam Ierymenko 5 years ago
parent
commit
f21ecb3762
64 changed files with 1672 additions and 1099 deletions
  1. 13 3
      include/ZeroTierCore.h
  2. 0 4
      node/AES.cpp
  3. 18 24
      node/AES.hpp
  4. 34 39
      node/Address.hpp
  5. 0 80
      node/AtomicCounter.hpp
  6. 29 65
      node/Buf.cpp
  7. 57 73
      node/Buf.hpp
  8. 0 1
      node/CMakeLists.txt
  9. 38 2
      node/CertificateOfMembership.cpp
  10. 14 48
      node/CertificateOfMembership.hpp
  11. 2 2
      node/CertificateOfOwnership.cpp
  12. 22 22
      node/CertificateOfOwnership.hpp
  13. 4 4
      node/Constants.hpp
  14. 3 3
      node/Credential.cpp
  15. 19 35
      node/Defragmenter.hpp
  16. 2 2
      node/Dictionary.hpp
  17. 2 2
      node/Endpoint.cpp
  18. 13 13
      node/Endpoint.hpp
  19. 14 14
      node/FCV.hpp
  20. 13 13
      node/Hashtable.hpp
  21. 2 2
      node/Identity.cpp
  22. 16 16
      node/Identity.hpp
  23. 2 2
      node/InetAddress.cpp
  24. 3 2
      node/InetAddress.hpp
  25. 26 27
      node/MAC.hpp
  26. 1 1
      node/Membership.hpp
  27. 4 5
      node/Meter.hpp
  28. 11 11
      node/MulticastGroup.hpp
  29. 49 28
      node/Mutex.hpp
  30. 7 7
      node/Network.cpp
  31. 13 14
      node/Network.hpp
  32. 9 2
      node/NetworkConfig.cpp
  33. 12 10
      node/NetworkConfig.hpp
  34. 46 16
      node/Node.cpp
  35. 47 15
      node/Node.hpp
  36. 3 0
      node/OS.hpp
  37. 2 2
      node/Path.cpp
  38. 12 13
      node/Path.hpp
  39. 201 79
      node/Peer.cpp
  40. 104 54
      node/Peer.hpp
  41. 3 2
      node/Poly1305.cpp
  42. 1 1
      node/Poly1305.hpp
  43. 13 3
      node/Protocol.cpp
  44. 58 6
      node/Protocol.hpp
  45. 3 3
      node/Revocation.cpp
  46. 18 18
      node/Revocation.hpp
  47. 0 2
      node/RuntimeEnvironment.hpp
  48. 14 14
      node/ScopedPtr.hpp
  49. 38 22
      node/SharedPtr.hpp
  50. 3 3
      node/Tag.cpp
  51. 28 28
      node/Tag.hpp
  52. 29 17
      node/Topology.cpp
  53. 33 19
      node/Topology.hpp
  54. 30 0
      node/Trace.cpp
  55. 32 5
      node/Trace.hpp
  56. 10 10
      node/TriviallyCopyable.hpp
  57. 16 16
      node/Utils.cpp
  58. 61 48
      node/Utils.hpp
  59. 383 95
      node/VL1.cpp
  60. 9 9
      node/VL1.hpp
  61. 10 10
      node/VL2.cpp
  62. 9 9
      node/VL2.hpp
  63. 2 2
      osdep/ManagedRoute.hpp
  64. 2 2
      root/root.cpp

+ 13 - 3
include/ZeroTierCore.h

@@ -1352,7 +1352,7 @@ typedef struct
 	const ZT_Identity *identity;
 	const ZT_Identity *identity;
 
 
 	/**
 	/**
-	 * Hash of identity public key(s)
+	 * SHA384 hash of identity public key(s)
 	 */
 	 */
 	uint8_t identityHash[48];
 	uint8_t identityHash[48];
 
 
@@ -1390,15 +1390,25 @@ typedef struct
 	 */
 	 */
 	struct sockaddr_storage bootstrap;
 	struct sockaddr_storage bootstrap;
 
 
+	/**
+	 * Number of networks in which this peer is authenticated
+	 */
+	unsigned int networkCount;
+
+	/**
+	 * Network IDs for networks (array size: networkCount)
+	 */
+	uint64_t *networks;
+
 	/**
 	/**
 	 * Number of paths (size of paths[])
 	 * Number of paths (size of paths[])
 	 */
 	 */
 	unsigned int pathCount;
 	unsigned int pathCount;
 
 
 	/**
 	/**
-	 * Known network paths to peer
+	 * Known network paths to peer (array size: pathCount)
 	 */
 	 */
-	ZT_PeerPhysicalPath paths[ZT_MAX_PEER_NETWORK_PATHS];
+	ZT_PeerPhysicalPath *paths;
 } ZT_Peer;
 } ZT_Peer;
 
 
 /**
 /**

File diff suppressed because it is too large
+ 0 - 4
node/AES.cpp


+ 18 - 24
node/AES.hpp

@@ -36,14 +36,14 @@ namespace ZeroTier {
 class AES
 class AES
 {
 {
 public:
 public:
-	ZT_ALWAYS_INLINE AES() {}
-	explicit ZT_ALWAYS_INLINE AES(const uint8_t key[32]) { this->init(key); }
+	ZT_ALWAYS_INLINE AES() noexcept {}
+	explicit ZT_ALWAYS_INLINE AES(const uint8_t key[32]) noexcept { this->init(key); }
 	ZT_ALWAYS_INLINE ~AES() { Utils::burn(&_k,sizeof(_k)); }
 	ZT_ALWAYS_INLINE ~AES() { Utils::burn(&_k,sizeof(_k)); }
 
 
 	/**
 	/**
 	 * Set (or re-set) this AES256 cipher's key
 	 * Set (or re-set) this AES256 cipher's key
 	 */
 	 */
-	ZT_ALWAYS_INLINE void init(const uint8_t key[32])
+	ZT_ALWAYS_INLINE void init(const uint8_t key[32]) noexcept
 	{
 	{
 #ifdef ZT_AES_AESNI
 #ifdef ZT_AES_AESNI
 		if (likely(Utils::CPUID.aes)) {
 		if (likely(Utils::CPUID.aes)) {
@@ -60,7 +60,7 @@ public:
 	 * @param in Input block
 	 * @param in Input block
 	 * @param out Output block (can be same as input)
 	 * @param out Output block (can be same as input)
 	 */
 	 */
-	ZT_ALWAYS_INLINE void encrypt(const uint8_t in[16],uint8_t out[16]) const
+	ZT_ALWAYS_INLINE void encrypt(const uint8_t in[16],uint8_t out[16]) const noexcept
 	{
 	{
 #ifdef ZT_AES_AESNI
 #ifdef ZT_AES_AESNI
 		if (likely(Utils::CPUID.aes)) {
 		if (likely(Utils::CPUID.aes)) {
@@ -71,11 +71,6 @@ public:
 		_encryptSW(in,out);
 		_encryptSW(in,out);
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE void gcm(const uint8_t iv[12],const void *in,const unsigned int len,uint8_t out[16],uint8_t tag[16]) const
-	{
-		// TODO
-	}
-
 private:
 private:
 	static const uint32_t Te0[256];
 	static const uint32_t Te0[256];
 	static const uint32_t Te1[256];
 	static const uint32_t Te1[256];
@@ -83,11 +78,10 @@ private:
 	static const uint32_t Te3[256];
 	static const uint32_t Te3[256];
 	static const uint32_t rcon[10];
 	static const uint32_t rcon[10];
 
 
-	void _initSW(const uint8_t key[32]);
-	void _encryptSW(const uint8_t in[16],uint8_t out[16]) const;
-	void _gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const;
+	void _initSW(const uint8_t key[32]) noexcept;
+	void _encryptSW(const uint8_t in[16],uint8_t out[16]) const noexcept;
+	void _gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const noexcept;
 
 
-	/**************************************************************************/
 	union {
 	union {
 #ifdef ZT_AES_ARMNEON
 #ifdef ZT_AES_ARMNEON
 		// ARM NEON key and GMAC parameters
 		// ARM NEON key and GMAC parameters
@@ -110,10 +104,9 @@ private:
 			uint32_t ek[60];
 			uint32_t ek[60];
 		} sw;
 		} sw;
 	} _k;
 	} _k;
-	/**************************************************************************/
 
 
-#ifdef ZT_AES_ARMNEON /******************************************************/
-	static inline void _aes_256_expAssist_armneon(uint32x4_t prev1,uint32x4_t prev2,uint32_t rcon,uint32x4_t *e1,uint32x4_t *e2)
+#ifdef ZT_AES_ARMNEON
+	static inline void _aes_256_expAssist_armneon(uint32x4_t prev1,uint32x4_t prev2,uint32_t rcon,uint32x4_t *e1,uint32x4_t *e2) noexcept
 	{
 	{
 		uint32_t round1[4], round2[4], prv1[4], prv2[4];
 		uint32_t round1[4], round2[4], prv1[4], prv2[4];
 		vst1q_u32(prv1, prev1);
 		vst1q_u32(prv1, prev1);
@@ -131,7 +124,8 @@ private:
 		//uint32x4_t expansion[2] = {vld1q_u3(round1), vld1q_u3(round2)};
 		//uint32x4_t expansion[2] = {vld1q_u3(round1), vld1q_u3(round2)};
 		//return expansion;
 		//return expansion;
 	}
 	}
-	inline void _init_armneon(uint8x16_t encKey)
+
+	inline void _init_armneon(uint8x16_t encKey) noexcept
 	{
 	{
 		uint32x4_t *schedule = _k.neon.k;
 		uint32x4_t *schedule = _k.neon.k;
 		uint32x4_t e1,e2;
 		uint32x4_t e1,e2;
@@ -175,7 +169,7 @@ private:
 		*/
 		*/
 	}
 	}
 
 
-	inline void _encrypt_armneon(uint8x16_t *data) const
+	inline void _encrypt_armneon(uint8x16_t *data) const noexcept
 	{
 	{
 		*data = veorq_u8(*data, _k.neon.k[0]);
 		*data = veorq_u8(*data, _k.neon.k[0]);
 		*data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[1]));
 		*data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[1]));
@@ -193,12 +187,12 @@ private:
 		*data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[13]));
 		*data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[13]));
 		*data = vaeseq_u8(*data, _k.neon.k[14]);
 		*data = vaeseq_u8(*data, _k.neon.k[14]);
 	}
 	}
-#endif /*********************************************************************/
+#endif
 
 
-#ifdef ZT_AES_AESNI /********************************************************/
-	void _init_aesni(const uint8_t key[32]);
+#ifdef ZT_AES_AESNI
+	void _init_aesni(const uint8_t key[32]) noexcept;
 
 
-	ZT_ALWAYS_INLINE void _encrypt_aesni(const void *const in,void *const out) const
+	ZT_ALWAYS_INLINE void _encrypt_aesni(const void *const in,void *const out) const noexcept
 	{
 	{
 		__m128i tmp;
 		__m128i tmp;
 		tmp = _mm_loadu_si128((const __m128i *)in);
 		tmp = _mm_loadu_si128((const __m128i *)in);
@@ -219,8 +213,8 @@ private:
 		_mm_storeu_si128((__m128i *)out,_mm_aesenclast_si128(tmp,_k.ni.k[14]));
 		_mm_storeu_si128((__m128i *)out,_mm_aesenclast_si128(tmp,_k.ni.k[14]));
 	}
 	}
 
 
-	void _gmac_aesni(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const;
-#endif /* ZT_AES_AESNI ******************************************************/
+	void _gmac_aesni(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const noexcept;
+#endif
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 34 - 39
node/Address.hpp

@@ -14,20 +14,12 @@
 #ifndef ZT_ADDRESS_HPP
 #ifndef ZT_ADDRESS_HPP
 #define ZT_ADDRESS_HPP
 #define ZT_ADDRESS_HPP
 
 
-#include <cstdio>
-#include <cstdlib>
-#include <cstdint>
-#include <cstring>
-#include <cmath>
-
-#include <string>
-#include <vector>
-#include <algorithm>
-
 #include "Constants.hpp"
 #include "Constants.hpp"
 #include "Utils.hpp"
 #include "Utils.hpp"
 #include "TriviallyCopyable.hpp"
 #include "TriviallyCopyable.hpp"
 
 
+#define ZT_ADDRESS_STRING_SIZE_MAX 11
+
 namespace ZeroTier {
 namespace ZeroTier {
 
 
 /**
 /**
@@ -36,17 +28,17 @@ namespace ZeroTier {
 class Address : public TriviallyCopyable
 class Address : public TriviallyCopyable
 {
 {
 public:
 public:
-	ZT_ALWAYS_INLINE Address() : _a(0) {}
-	explicit ZT_ALWAYS_INLINE Address(const uint8_t b[5]) : _a(((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4]) {}
-	explicit ZT_ALWAYS_INLINE Address(const uint64_t a) : _a(a & 0xffffffffffULL) {}
+	ZT_ALWAYS_INLINE Address() noexcept : _a(0) {}
+	explicit ZT_ALWAYS_INLINE Address(const uint8_t b[5]) noexcept : _a(((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4]) {}
+	explicit ZT_ALWAYS_INLINE Address(const uint64_t a) noexcept : _a(a & 0xffffffffffULL) {}
 
 
-	ZT_ALWAYS_INLINE Address &operator=(const uint64_t a) { _a = (a & 0xffffffffffULL); return *this; }
+	ZT_ALWAYS_INLINE Address &operator=(const uint64_t a) noexcept { _a = (a & 0xffffffffffULL); return *this; }
 
 
 	/**
 	/**
 	 * @param bits Raw address -- 5 bytes, big-endian byte order
 	 * @param bits Raw address -- 5 bytes, big-endian byte order
 	 * @param len Length of array
 	 * @param len Length of array
 	 */
 	 */
-	ZT_ALWAYS_INLINE void setTo(const uint8_t b[5])
+	ZT_ALWAYS_INLINE void setTo(const uint8_t b[5]) noexcept
 	{
 	{
 		_a = ((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4];
 		_a = ((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4];
 	}
 	}
@@ -55,7 +47,7 @@ public:
 	 * @param bits Buffer to hold 5-byte address in big-endian byte order
 	 * @param bits Buffer to hold 5-byte address in big-endian byte order
 	 * @param len Length of array
 	 * @param len Length of array
 	 */
 	 */
-	ZT_ALWAYS_INLINE void copyTo(uint8_t b[5]) const
+	ZT_ALWAYS_INLINE void copyTo(uint8_t b[5]) const noexcept
 	{
 	{
 		b[0] = (uint8_t)(_a >> 32U);
 		b[0] = (uint8_t)(_a >> 32U);
 		b[1] = (uint8_t)(_a >> 24U);
 		b[1] = (uint8_t)(_a >> 24U);
@@ -67,17 +59,22 @@ public:
 	/**
 	/**
 	 * @return Integer containing address (0 to 2^40)
 	 * @return Integer containing address (0 to 2^40)
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint64_t toInt() const { return _a; }
+	ZT_ALWAYS_INLINE uint64_t toInt() const noexcept { return _a; }
+
+	/**
+	 * Set address to zero/NIL
+	 */
+	ZT_ALWAYS_INLINE void zero() noexcept { _a = 0; }
 
 
 	/**
 	/**
 	 * @return Hash code for use with Hashtable
 	 * @return Hash code for use with Hashtable
 	 */
 	 */
-	ZT_ALWAYS_INLINE unsigned long hashCode() const { return (unsigned long)_a; }
+	ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return (unsigned long)_a; }
 
 
 	/**
 	/**
 	 * @return Hexadecimal string
 	 * @return Hexadecimal string
 	 */
 	 */
-	ZT_ALWAYS_INLINE char *toString(char buf[11]) const { return Utils::hex10(_a,buf); }
+	ZT_ALWAYS_INLINE char *toString(char buf[ZT_ADDRESS_STRING_SIZE_MAX]) const noexcept { return Utils::hex10(_a,buf); }
 
 
 	/**
 	/**
 	 * Check if this address is reserved
 	 * Check if this address is reserved
@@ -88,31 +85,29 @@ public:
 	 *
 	 *
 	 * @return True if address is reserved and may not be used
 	 * @return True if address is reserved and may not be used
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool isReserved() const { return ((!_a)||((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX)); }
+	ZT_ALWAYS_INLINE bool isReserved() const noexcept { return ((!_a)||((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX)); }
 
 
 	/**
 	/**
 	 * @param i Value from 0 to 4 (inclusive)
 	 * @param i Value from 0 to 4 (inclusive)
 	 * @return Byte at said position (address interpreted in big-endian order)
 	 * @return Byte at said position (address interpreted in big-endian order)
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint8_t operator[](unsigned int i) const { return (uint8_t)(_a >> (32 - (i * 8))); }
-
-	ZT_ALWAYS_INLINE operator bool() const { return (_a != 0); }
-
-	ZT_ALWAYS_INLINE void zero() { _a = 0; }
-
-	ZT_ALWAYS_INLINE bool operator==(const uint64_t &a) const { return (_a == (a & 0xffffffffffULL)); }
-	ZT_ALWAYS_INLINE bool operator!=(const uint64_t &a) const { return (_a != (a & 0xffffffffffULL)); }
-	ZT_ALWAYS_INLINE bool operator>(const uint64_t &a) const { return (_a > (a & 0xffffffffffULL)); }
-	ZT_ALWAYS_INLINE bool operator<(const uint64_t &a) const { return (_a < (a & 0xffffffffffULL)); }
-	ZT_ALWAYS_INLINE bool operator>=(const uint64_t &a) const { return (_a >= (a & 0xffffffffffULL)); }
-	ZT_ALWAYS_INLINE bool operator<=(const uint64_t &a) const { return (_a <= (a & 0xffffffffffULL)); }
-
-	ZT_ALWAYS_INLINE bool operator==(const Address &a) const { return (_a == a._a); }
-	ZT_ALWAYS_INLINE bool operator!=(const Address &a) const { return (_a != a._a); }
-	ZT_ALWAYS_INLINE bool operator>(const Address &a) const { return (_a > a._a); }
-	ZT_ALWAYS_INLINE bool operator<(const Address &a) const { return (_a < a._a); }
-	ZT_ALWAYS_INLINE bool operator>=(const Address &a) const { return (_a >= a._a); }
-	ZT_ALWAYS_INLINE bool operator<=(const Address &a) const { return (_a <= a._a); }
+	ZT_ALWAYS_INLINE uint8_t operator[](unsigned int i) const noexcept { return (uint8_t)(_a >> (32 - (i * 8))); }
+
+	ZT_ALWAYS_INLINE operator bool() const noexcept { return (_a != 0); }
+
+	ZT_ALWAYS_INLINE bool operator==(const uint64_t &a) const noexcept { return (_a == (a & 0xffffffffffULL)); }
+	ZT_ALWAYS_INLINE bool operator!=(const uint64_t &a) const noexcept { return (_a != (a & 0xffffffffffULL)); }
+	ZT_ALWAYS_INLINE bool operator>(const uint64_t &a) const noexcept { return (_a > (a & 0xffffffffffULL)); }
+	ZT_ALWAYS_INLINE bool operator<(const uint64_t &a) const noexcept { return (_a < (a & 0xffffffffffULL)); }
+	ZT_ALWAYS_INLINE bool operator>=(const uint64_t &a) const noexcept { return (_a >= (a & 0xffffffffffULL)); }
+	ZT_ALWAYS_INLINE bool operator<=(const uint64_t &a) const noexcept { return (_a <= (a & 0xffffffffffULL)); }
+
+	ZT_ALWAYS_INLINE bool operator==(const Address &a) const noexcept { return (_a == a._a); }
+	ZT_ALWAYS_INLINE bool operator!=(const Address &a) const noexcept { return (_a != a._a); }
+	ZT_ALWAYS_INLINE bool operator>(const Address &a) const noexcept { return (_a > a._a); }
+	ZT_ALWAYS_INLINE bool operator<(const Address &a) const noexcept { return (_a < a._a); }
+	ZT_ALWAYS_INLINE bool operator>=(const Address &a) const noexcept { return (_a >= a._a); }
+	ZT_ALWAYS_INLINE bool operator<=(const Address &a) const noexcept { return (_a <= a._a); }
 
 
 #if 0
 #if 0
 	/**
 	/**

+ 0 - 80
node/AtomicCounter.hpp

@@ -1,80 +0,0 @@
-/*
- * Copyright (c)2013-2020 ZeroTier, Inc.
- *
- * Use of this software is governed by the Business Source License included
- * in the LICENSE.TXT file in the project's root directory.
- *
- * Change Date: 2024-01-01
- *
- * On the date above, in accordance with the Business Source License, use
- * of this software will be governed by version 2.0 of the Apache License.
- */
-/****/
-
-#ifndef ZT_ATOMICCOUNTER_HPP
-#define ZT_ATOMICCOUNTER_HPP
-
-#include "Constants.hpp"
-#include "TriviallyCopyable.hpp"
-
-#ifndef __GNUC__
-#include <intrin.h>
-#endif
-
-namespace ZeroTier {
-
-/**
- * Simple atomic integer used for reference and other counters
- *
- * @tparam T Type of underlying integer (default: int)
- */
-template<typename T = int>
-class AtomicCounter : public TriviallyCopyable
-{
-public:
-	ZT_ALWAYS_INLINE AtomicCounter() : _v(0) {}
-	explicit ZT_ALWAYS_INLINE AtomicCounter(T iv) : _v(iv) {}
-
-	ZT_ALWAYS_INLINE T load() const
-	{
-#ifdef __GNUC__
-		return __sync_or_and_fetch(&_v,0);
-#else
-		return _v.load();
-#endif
-	}
-
-	ZT_ALWAYS_INLINE void zero() { _v = 0; }
-
-	ZT_ALWAYS_INLINE T operator++()
-	{
-#ifdef __GNUC__
-		return __sync_add_and_fetch(&_v,1);
-#else
-		return ++_v;
-#endif
-	}
-
-	ZT_ALWAYS_INLINE T operator--()
-	{
-#ifdef __GNUC__
-		return __sync_sub_and_fetch(&_v,1);
-#else
-		return --_v;
-#endif
-	}
-
-private:
-	ZT_ALWAYS_INLINE AtomicCounter(const AtomicCounter &) {}
-	ZT_ALWAYS_INLINE const AtomicCounter &operator=(const AtomicCounter &) { return *this; }
-
-#ifdef __GNUC__
-	T _v;
-#else
-	typename std::atomic<T> _v;
-#endif
-};
-
-} // namespace ZeroTier
-
-#endif

+ 29 - 65
node/Buf.cpp

@@ -15,92 +15,56 @@
 
 
 namespace ZeroTier {
 namespace ZeroTier {
 
 
-#ifdef __GNUC__
-uintptr_t _Buf_pool = 0;
-#else
-std::atomic<uintptr_t> _Buf_pool(0);
-#endif
+static std::atomic<uintptr_t> s_pool(0);
 
 
-void _Buf_release(void *ptr,std::size_t sz)
-{
-	if (ptr) {
-		uintptr_t bb;
-		const uintptr_t locked = ~((uintptr_t)0);
-		for (;;) {
-#ifdef __GNUC__
-			bb = __sync_fetch_and_or(&_Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
-#else
-			bb = s_pool.fetch_or(locked);
-#endif
-			if (bb != locked)
-				break;
-		}
-
-		((Buf *)ptr)->__nextInPool = bb;
-#ifdef __GNUC__
-		__sync_fetch_and_and(&_Buf_pool,(uintptr_t)ptr);
-#else
-		s_pool.store((uintptr_t)ptr);
-#endif
-	}
-}
-
-void *_Buf_get()
+void *Buf::operator new(std::size_t sz) noexcept
 {
 {
 	uintptr_t bb;
 	uintptr_t bb;
-	const uintptr_t locked = ~((uintptr_t)0);
 	for (;;) {
 	for (;;) {
-#ifdef __GNUC__
-		bb = __sync_fetch_and_or(&_Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
-#else
-		bb = s_pool.fetch_or(locked);
-#endif
-		if (bb != locked)
+		bb = s_pool.exchange(~((uintptr_t)0));
+		if (bb != ~((uintptr_t)0))
 			break;
 			break;
 	}
 	}
 
 
 	Buf *b;
 	Buf *b;
-	if (bb == 0) {
-#ifdef __GNUC__
-		__sync_fetch_and_and(&_Buf_pool,bb);
-#else
-		s_pool.store(bb);
-#endif
-		b = (Buf *)malloc(sizeof(Buf));
-		if (!b)
-			throw std::bad_alloc();
-	} else {
+	if (bb) {
 		b = (Buf *)bb;
 		b = (Buf *)bb;
-#ifdef __GNUC__
-		__sync_fetch_and_and(&_Buf_pool,b->__nextInPool);
-#else
 		s_pool.store(b->__nextInPool);
 		s_pool.store(b->__nextInPool);
-#endif
+	} else {
+		s_pool.store(0);
+		b = (Buf *)malloc(sz);
+		if (!b)
+			return nullptr;
 	}
 	}
 
 
-	b->__refCount.zero();
+	b->__refCount.store(0);
 	return (void *)b;
 	return (void *)b;
 }
 }
 
 
-void freeBufPool()
+void Buf::operator delete(void *ptr) noexcept
+{
+	if (ptr) {
+		uintptr_t bb;
+		for (;;) {
+			bb = s_pool.exchange(~((uintptr_t)0));
+			if (bb != ~((uintptr_t)0))
+				break;
+		}
+
+		((Buf *)ptr)->__nextInPool = bb;
+		s_pool.store((uintptr_t)ptr);
+	}
+}
+
+void Buf::freePool() noexcept
 {
 {
 	uintptr_t bb;
 	uintptr_t bb;
-	const uintptr_t locked = ~((uintptr_t)0);
 	for (;;) {
 	for (;;) {
-#ifdef __GNUC__
-		bb = __sync_fetch_and_or(&_Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
-#else
-		bb = s_pool.fetch_or(locked);
-#endif
-		if (bb != locked)
+		bb = s_pool.exchange(~((uintptr_t)0));
+		if (bb != ~((uintptr_t)0))
 			break;
 			break;
 	}
 	}
-
-#ifdef __GNUC__
-	__sync_fetch_and_and(&_Buf_pool,(uintptr_t)0);
-#else
 	s_pool.store((uintptr_t)0);
 	s_pool.store((uintptr_t)0);
-#endif
 
 
 	while (bb != 0) {
 	while (bb != 0) {
 		uintptr_t next = ((Buf *)bb)->__nextInPool;
 		uintptr_t next = ((Buf *)bb)->__nextInPool;

+ 57 - 73
node/Buf.hpp

@@ -15,7 +15,6 @@
 #define ZT_BUF_HPP
 #define ZT_BUF_HPP
 
 
 #include "Constants.hpp"
 #include "Constants.hpp"
-#include "AtomicCounter.hpp"
 #include "Utils.hpp"
 #include "Utils.hpp"
 #include "SharedPtr.hpp"
 #include "SharedPtr.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
@@ -28,10 +27,7 @@
 #include <stdexcept>
 #include <stdexcept>
 #include <utility>
 #include <utility>
 #include <algorithm>
 #include <algorithm>
-
-#ifndef __GNUC__
-#include <atomic>
-#endif
+#include <new>
 
 
 // Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
 // Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
 // and is a power of two. It needs to be a power of two because masking is significantly faster
 // and is a power of two. It needs to be a power of two because masking is significantly faster
@@ -41,23 +37,6 @@
 
 
 namespace ZeroTier {
 namespace ZeroTier {
 
 
-#ifdef __GNUC__
-extern uintptr_t _Buf_pool;
-#else
-extern std::atomic<uintptr_t> _Buf_pool;
-#endif
-void _Buf_release(void *ptr,std::size_t sz);
-void *_Buf_get();
-
-/**
- * Free all instances of Buf in shared pool.
- *
- * New buffers will be created and the pool repopulated if get() is called
- * and outstanding buffers will still be returned to the pool. This just
- * frees buffers currently held in reserve.
- */
-void freeBufPool();
-
 /**
 /**
  * Buffer and methods for branch-free bounds-checked data assembly and parsing
  * Buffer and methods for branch-free bounds-checked data assembly and parsing
  *
  *
@@ -101,24 +80,32 @@ void freeBufPool();
 class Buf
 class Buf
 {
 {
 	friend class SharedPtr< Buf >;
 	friend class SharedPtr< Buf >;
-	friend void _Buf_release(void *,std::size_t);
-	friend void *_Buf_get();
-	friend void freeBufPool();
 
 
 public:
 public:
-	static void operator delete(void *ptr,std::size_t sz) { _Buf_release(ptr,sz); }
+	// New and delete operators that allocate Buf instances from a shared lock-free memory pool.
+	static void *operator new(std::size_t sz) noexcept;
+	static void operator delete(void *ptr) noexcept;
+
+	/**
+	 * Free all instances of Buf in shared pool.
+	 *
+	 * New buffers will be created and the pool repopulated if get() is called
+	 * and outstanding buffers will still be returned to the pool. This just
+	 * frees buffers currently held in reserve.
+	 */
+	static void freePool() noexcept;
 
 
 	/**
 	/**
 	 * Slice is almost exactly like the built-in slice data structure in Go
 	 * Slice is almost exactly like the built-in slice data structure in Go
 	 */
 	 */
 	struct Slice : TriviallyCopyable
 	struct Slice : TriviallyCopyable
 	{
 	{
-		ZT_ALWAYS_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) : b(b_),s(s_),e(e_) {}
-		ZT_ALWAYS_INLINE Slice() : b(),s(0),e(0) {}
+		ZT_ALWAYS_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) noexcept : b(b_),s(s_),e(e_) {}
+		ZT_ALWAYS_INLINE Slice() noexcept : b(),s(0),e(0) {}
 
 
-		ZT_ALWAYS_INLINE operator bool() const { return (b); }
-		ZT_ALWAYS_INLINE unsigned int size() const { return (e - s); }
-		ZT_ALWAYS_INLINE void zero() { b.zero(); s = 0; e = 0; }
+		ZT_ALWAYS_INLINE operator bool() const noexcept { return (b); }
+		ZT_ALWAYS_INLINE unsigned int size() const noexcept { return (e - s); }
+		ZT_ALWAYS_INLINE void zero() noexcept { b.zero(); s = 0; e = 0; }
 
 
 		/**
 		/**
 		 * Buffer holding slice data
 		 * Buffer holding slice data
@@ -151,7 +138,7 @@ public:
 	 * @return Single slice containing fully assembled buffer (empty on error)
 	 * @return Single slice containing fully assembled buffer (empty on error)
 	 */
 	 */
 	template<unsigned int FCVC>
 	template<unsigned int FCVC>
-	static ZT_ALWAYS_INLINE Buf::Slice assembleSliceVector(FCV<Buf::Slice,FCVC> &fcv)
+	static ZT_ALWAYS_INLINE Buf::Slice assembleSliceVector(FCV<Buf::Slice,FCVC> &fcv) noexcept
 	{
 	{
 		Buf::Slice r;
 		Buf::Slice r;
 
 
@@ -179,23 +166,20 @@ public:
 		return r;
 		return r;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE Buf() {}
-	ZT_ALWAYS_INLINE Buf(const Buf &b2) { memcpy(b,b2.b,ZT_BUF_MEM_SIZE); }
+	/**
+	 * Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
+	 */
+	ZT_ALWAYS_INLINE Buf() noexcept {}
+
+	ZT_ALWAYS_INLINE Buf(const Buf &b2) noexcept { memcpy(b,b2.b,ZT_BUF_MEM_SIZE); }
 
 
-	ZT_ALWAYS_INLINE Buf &operator=(const Buf &b2)
+	ZT_ALWAYS_INLINE Buf &operator=(const Buf &b2) noexcept
 	{
 	{
 		if (this != &b2)
 		if (this != &b2)
 			memcpy(b,b2.b,ZT_BUF_MEM_SIZE);
 			memcpy(b,b2.b,ZT_BUF_MEM_SIZE);
 		return *this;
 		return *this;
 	}
 	}
 
 
-	/**
-	 * Get obtains a buffer from the pool or allocates a new buffer if the pool is empty
-	 *
-	 * @return Buffer instance
-	 */
-	static ZT_ALWAYS_INLINE SharedPtr< Buf > get() { return SharedPtr<Buf>((Buf *)_Buf_get()); }
-
 	/**
 	/**
 	 * Check for overflow beyond the size of the buffer
 	 * Check for overflow beyond the size of the buffer
 	 *
 	 *
@@ -205,7 +189,7 @@ public:
 	 * @param ii Iterator to check
 	 * @param ii Iterator to check
 	 * @return True if iterator has read past the size of the buffer
 	 * @return True if iterator has read past the size of the buffer
 	 */
 	 */
-	static ZT_ALWAYS_INLINE bool writeOverflow(const int &ii) { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
+	static ZT_ALWAYS_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
 
 
 	/**
 	/**
 	 * Check for overflow beyond the size of the data that should be in the buffer
 	 * Check for overflow beyond the size of the data that should be in the buffer
@@ -217,17 +201,17 @@ public:
 	 * @param size Size of data that should be in buffer
 	 * @param size Size of data that should be in buffer
 	 * @return True if iterator has read past the size of the data
 	 * @return True if iterator has read past the size of the data
 	 */
 	 */
-	static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size) { return ((ii - (int)size) > 0); }
+	static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
 
 
 	/**
 	/**
 	 * Set all memory to zero
 	 * Set all memory to zero
 	 */
 	 */
-	ZT_ALWAYS_INLINE void clear() { memset(b,0,ZT_BUF_MEM_SIZE); }
+	ZT_ALWAYS_INLINE void clear() noexcept { memset(b,0,ZT_BUF_MEM_SIZE); }
 
 
 	/**
 	/**
 	 * Zero security critical data using Utils::burn() to ensure it's never optimized out.
 	 * Zero security critical data using Utils::burn() to ensure it's never optimized out.
 	 */
 	 */
-	ZT_ALWAYS_INLINE void burn() { Utils::burn(b,ZT_BUF_MEM_SIZE); }
+	ZT_ALWAYS_INLINE void burn() noexcept { Utils::burn(b,ZT_BUF_MEM_SIZE); }
 
 
 	/**
 	/**
 	 * Read a byte
 	 * Read a byte
@@ -235,7 +219,7 @@ public:
 	 * @param ii Iterator
 	 * @param ii Iterator
 	 * @return Byte (undefined on overflow)
 	 * @return Byte (undefined on overflow)
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const
+	ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const noexcept
 	{
 	{
 		const int s = ii++;
 		const int s = ii++;
 		return b[(unsigned int)s & ZT_BUF_MEM_MASK];
 		return b[(unsigned int)s & ZT_BUF_MEM_MASK];
@@ -247,7 +231,7 @@ public:
 	 * @param ii Integer
 	 * @param ii Integer
 	 * @return Integer (undefined on overflow)
 	 * @return Integer (undefined on overflow)
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint16_t rI16(int &ii) const
+	ZT_ALWAYS_INLINE uint16_t rI16(int &ii) const noexcept
 	{
 	{
 		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
 		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
 		ii += 2;
 		ii += 2;
@@ -266,7 +250,7 @@ public:
 	 * @param ii Integer
 	 * @param ii Integer
 	 * @return Integer (undefined on overflow)
 	 * @return Integer (undefined on overflow)
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint32_t rI32(int &ii) const
+	ZT_ALWAYS_INLINE uint32_t rI32(int &ii) const noexcept
 	{
 	{
 		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
 		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
 		ii += 4;
 		ii += 4;
@@ -287,7 +271,7 @@ public:
 	 * @param ii Integer
 	 * @param ii Integer
 	 * @return Integer (undefined on overflow)
 	 * @return Integer (undefined on overflow)
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint64_t rI64(int &ii) const
+	ZT_ALWAYS_INLINE uint64_t rI64(int &ii) const noexcept
 	{
 	{
 		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
 		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
 		ii += 8;
 		ii += 8;
@@ -322,7 +306,7 @@ public:
 	 * @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
 	 * @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const
+	ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const noexcept
 	{
 	{
 		if (ii < ZT_BUF_MEM_SIZE) {
 		if (ii < ZT_BUF_MEM_SIZE) {
 			int ms = obj.unmarshal(b + ii,ZT_BUF_MEM_SIZE - ii);
 			int ms = obj.unmarshal(b + ii,ZT_BUF_MEM_SIZE - ii);
@@ -344,7 +328,7 @@ public:
 	 * @param bufSize Capacity of buffer in bytes
 	 * @param bufSize Capacity of buffer in bytes
 	 * @return Pointer to buf or NULL on overflow or error
 	 * @return Pointer to buf or NULL on overflow or error
 	 */
 	 */
-	ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const
+	ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
 	{
 	{
 		const char *const s = (const char *)(b + ii);
 		const char *const s = (const char *)(b + ii);
 		const int sii = ii;
 		const int sii = ii;
@@ -370,7 +354,7 @@ public:
 	 * @param ii Iterator
 	 * @param ii Iterator
 	 * @return Pointer to null-terminated C-style string or NULL on overflow or error
 	 * @return Pointer to null-terminated C-style string or NULL on overflow or error
 	 */
 	 */
-	ZT_ALWAYS_INLINE const char *rSnc(int &ii) const
+	ZT_ALWAYS_INLINE const char *rSnc(int &ii) const noexcept
 	{
 	{
 		const char *const s = (const char *)(b + ii);
 		const char *const s = (const char *)(b + ii);
 		while (ii < ZT_BUF_MEM_SIZE) {
 		while (ii < ZT_BUF_MEM_SIZE) {
@@ -391,7 +375,7 @@ public:
 	 * @param len Length of buffer
 	 * @param len Length of buffer
 	 * @return Pointer to data or NULL on overflow or error
 	 * @return Pointer to data or NULL on overflow or error
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *bytes,unsigned int len) const
+	ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *bytes,unsigned int len) const noexcept
 	{
 	{
 		if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
 		if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
 			memcpy(bytes,b + ii,len);
 			memcpy(bytes,b + ii,len);
@@ -413,7 +397,7 @@ public:
 	 * @param len Length of data field to obtain a pointer to
 	 * @param len Length of data field to obtain a pointer to
 	 * @return Pointer to field or NULL on overflow
 	 * @return Pointer to field or NULL on overflow
 	 */
 	 */
-	ZT_ALWAYS_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const
+	ZT_ALWAYS_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const noexcept
 	{
 	{
 		const uint8_t *const b = b + ii;
 		const uint8_t *const b = b + ii;
 		return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
 		return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
@@ -425,7 +409,7 @@ public:
 	 * @param ii Iterator
 	 * @param ii Iterator
 	 * @param n Byte
 	 * @param n Byte
 	 */
 	 */
-	ZT_ALWAYS_INLINE void wI(int &ii,uint8_t n)
+	ZT_ALWAYS_INLINE void wI(int &ii,uint8_t n) noexcept
 	{
 	{
 		const int s = ii++;
 		const int s = ii++;
 		b[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
 		b[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
@@ -437,7 +421,7 @@ public:
 	 * @param ii Iterator
 	 * @param ii Iterator
 	 * @param n Integer
 	 * @param n Integer
 	 */
 	 */
-	ZT_ALWAYS_INLINE void wI(int &ii,uint16_t n)
+	ZT_ALWAYS_INLINE void wI(int &ii,uint16_t n) noexcept
 	{
 	{
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		ii += 2;
 		ii += 2;
@@ -455,7 +439,7 @@ public:
 	 * @param ii Iterator
 	 * @param ii Iterator
 	 * @param n Integer
 	 * @param n Integer
 	 */
 	 */
-	ZT_ALWAYS_INLINE void wI(int &ii,uint32_t n)
+	ZT_ALWAYS_INLINE void wI(int &ii,uint32_t n) noexcept
 	{
 	{
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		ii += 4;
 		ii += 4;
@@ -475,7 +459,7 @@ public:
 	 * @param ii Iterator
 	 * @param ii Iterator
 	 * @param n Integer
 	 * @param n Integer
 	 */
 	 */
-	ZT_ALWAYS_INLINE void wI(int &ii,uint64_t n)
+	ZT_ALWAYS_INLINE void wI(int &ii,uint64_t n) noexcept
 	{
 	{
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		ii += 8;
 		ii += 8;
@@ -501,7 +485,7 @@ public:
 	 * @param t Object to write
 	 * @param t Object to write
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	ZT_ALWAYS_INLINE void wO(int &ii,T &t)
+	ZT_ALWAYS_INLINE void wO(int &ii,T &t) noexcept
 	{
 	{
 		const int s = ii;
 		const int s = ii;
 		if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
 		if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
@@ -519,7 +503,7 @@ public:
 	 * @param ii Iterator
 	 * @param ii Iterator
 	 * @param s String to write (writes an empty string if this is NULL)
 	 * @param s String to write (writes an empty string if this is NULL)
 	 */
 	 */
-	ZT_ALWAYS_INLINE void wS(int &ii,const char *s)
+	ZT_ALWAYS_INLINE void wS(int &ii,const char *s) noexcept
 	{
 	{
 		if (s) {
 		if (s) {
 			char c;
 			char c;
@@ -539,7 +523,7 @@ public:
 	 * @param bytes Bytes to write
 	 * @param bytes Bytes to write
 	 * @param len Size of data in bytes
 	 * @param len Size of data in bytes
 	 */
 	 */
-	ZT_ALWAYS_INLINE void wB(int &ii,const void *const bytes,const unsigned int len)
+	ZT_ALWAYS_INLINE void wB(int &ii,const void *const bytes,const unsigned int len) noexcept
 	{
 	{
 		const int s = ii;
 		const int s = ii;
 		if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
 		if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
@@ -549,7 +533,7 @@ public:
 	/**
 	/**
 	 * @return Capacity of this buffer (usable size of data.bytes)
 	 * @return Capacity of this buffer (usable size of data.bytes)
 	 */
 	 */
-	static constexpr unsigned int capacity() { return ZT_BUF_MEM_SIZE; }
+	static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
 
 
 	/**
 	/**
 	 * Cast data in 'b' to a (usually packed) structure type
 	 * Cast data in 'b' to a (usually packed) structure type
@@ -563,7 +547,7 @@ public:
 	 * @return Reference to 'b' cast to type T
 	 * @return Reference to 'b' cast to type T
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) { return *reinterpret_cast<T *>(b + i); }
+	ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) noexcept { return *reinterpret_cast<T *>(b + i); }
 
 
 	/**
 	/**
 	 * Cast data in 'b' to a (usually packed) structure type (const)
 	 * Cast data in 'b' to a (usually packed) structure type (const)
@@ -577,14 +561,14 @@ public:
 	 * @return Reference to 'b' cast to type T
 	 * @return Reference to 'b' cast to type T
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const { return *reinterpret_cast<const T *>(b + i); }
+	ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const noexcept { return *reinterpret_cast<const T *>(b + i); }
 
 
-	ZT_ALWAYS_INLINE bool operator==(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) == 0); }
-	ZT_ALWAYS_INLINE bool operator!=(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) != 0); }
-	ZT_ALWAYS_INLINE bool operator<(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) < 0); }
-	ZT_ALWAYS_INLINE bool operator<=(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) <= 0); }
-	ZT_ALWAYS_INLINE bool operator>(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) > 0); }
-	ZT_ALWAYS_INLINE bool operator>=(const Buf &b2) const { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) >= 0); }
+	ZT_ALWAYS_INLINE bool operator==(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) == 0); }
+	ZT_ALWAYS_INLINE bool operator!=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) != 0); }
+	ZT_ALWAYS_INLINE bool operator<(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) < 0); }
+	ZT_ALWAYS_INLINE bool operator<=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) <= 0); }
+	ZT_ALWAYS_INLINE bool operator>(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) > 0); }
+	ZT_ALWAYS_INLINE bool operator>=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) >= 0); }
 
 
 	/**
 	/**
 	 * Raw data held in buffer
 	 * Raw data held in buffer
@@ -597,10 +581,10 @@ public:
 
 
 private:
 private:
 	// Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
 	// Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
-	volatile uintptr_t __nextInPool;
+	std::atomic<uintptr_t> __nextInPool;
 
 
 	// Reference counter for SharedPtr<>
 	// Reference counter for SharedPtr<>
-	AtomicCounter<int> __refCount;
+	std::atomic<int> __refCount;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 0 - 1
node/CMakeLists.txt

@@ -4,7 +4,6 @@ project(zt_core)
 set(core_headers
 set(core_headers
 	../include/ZeroTierCore.h
 	../include/ZeroTierCore.h
 	Address.hpp
 	Address.hpp
-	AtomicCounter.hpp
 	Buf.hpp
 	Buf.hpp
 	C25519.hpp
 	C25519.hpp
 	Capability.hpp
 	Capability.hpp

+ 38 - 2
node/CertificateOfMembership.cpp

@@ -49,6 +49,42 @@ void CertificateOfMembership::setQualifier(uint64_t id,uint64_t value,uint64_t m
 	}
 	}
 }
 }
 
 
+bool CertificateOfMembership::agreesWith(const CertificateOfMembership &other) const
+{
+	unsigned int myidx = 0;
+	unsigned int otheridx = 0;
+
+	if ((_qualifierCount == 0)||(other._qualifierCount == 0))
+		return false;
+
+	while (myidx < _qualifierCount) {
+		// Fail if we're at the end of other, since this means the field is
+		// missing.
+		if (otheridx >= other._qualifierCount)
+			return false;
+
+		// Seek to corresponding tuple in other, ignoring tuples that
+		// we may not have. If we run off the end of other, the tuple is
+		// missing. This works because tuples are sorted by ID.
+		while (other._qualifiers[otheridx].id != _qualifiers[myidx].id) {
+			++otheridx;
+			if (otheridx >= other._qualifierCount)
+				return false;
+		}
+
+		// Compare to determine if the absolute value of the difference
+		// between these two parameters is within our maxDelta.
+		const uint64_t a = _qualifiers[myidx].value;
+		const uint64_t b = other._qualifiers[myidx].value;
+		if (((a >= b) ? (a - b) : (b - a)) > _qualifiers[myidx].maxDelta)
+			return false;
+
+		++myidx;
+	}
+
+	return true;
+}
+
 bool CertificateOfMembership::sign(const Identity &with)
 bool CertificateOfMembership::sign(const Identity &with)
 {
 {
 	uint64_t buf[ZT_NETWORK_COM_MAX_QUALIFIERS * 3];
 	uint64_t buf[ZT_NETWORK_COM_MAX_QUALIFIERS * 3];
@@ -69,7 +105,7 @@ bool CertificateOfMembership::sign(const Identity &with)
 	}
 	}
 }
 }
 
 
-int CertificateOfMembership::marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX]) const
+int CertificateOfMembership::marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX]) const noexcept
 {
 {
 	data[0] = 1;
 	data[0] = 1;
 	Utils::storeBigEndian<uint16_t>(data + 1,(uint16_t)_qualifierCount);
 	Utils::storeBigEndian<uint16_t>(data + 1,(uint16_t)_qualifierCount);
@@ -90,7 +126,7 @@ int CertificateOfMembership::marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MAR
 	return p;
 	return p;
 }
 }
 
 
-int CertificateOfMembership::unmarshal(const uint8_t *data,int len)
+int CertificateOfMembership::unmarshal(const uint8_t *data,int len) noexcept
 {
 {
 	if ((len < 3)||(data[0] != 1))
 	if ((len < 3)||(data[0] != 1))
 		return -1;
 		return -1;

+ 14 - 48
node/CertificateOfMembership.hpp

@@ -70,7 +70,7 @@ class CertificateOfMembership : public Credential
 	friend class Credential;
 	friend class Credential;
 
 
 public:
 public:
-	static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_COM; }
+	static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COM; }
 
 
 	/**
 	/**
 	 * Reserved qualifier IDs
 	 * Reserved qualifier IDs
@@ -102,7 +102,7 @@ public:
 	/**
 	/**
 	 * Create an empty certificate of membership
 	 * Create an empty certificate of membership
 	 */
 	 */
-	ZT_ALWAYS_INLINE CertificateOfMembership() { memoryZero(this); }
+	ZT_ALWAYS_INLINE CertificateOfMembership() noexcept { memoryZero(this); }
 
 
 	/**
 	/**
 	 * Create from required fields common to all networks
 	 * Create from required fields common to all networks
@@ -117,17 +117,17 @@ public:
 	/**
 	/**
 	 * @return True if there's something here
 	 * @return True if there's something here
 	 */
 	 */
-	ZT_ALWAYS_INLINE operator bool() const { return (_qualifierCount != 0); }
+	ZT_ALWAYS_INLINE operator bool() const noexcept { return (_qualifierCount != 0); }
 
 
 	/**
 	/**
 	 * @return Credential ID, always 0 for COMs
 	 * @return Credential ID, always 0 for COMs
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint32_t id() const { return 0; }
+	ZT_ALWAYS_INLINE uint32_t id() const noexcept { return 0; }
 
 
 	/**
 	/**
 	 * @return Timestamp for this cert and maximum delta for timestamp
 	 * @return Timestamp for this cert and maximum delta for timestamp
 	 */
 	 */
-	ZT_ALWAYS_INLINE int64_t timestamp() const
+	ZT_ALWAYS_INLINE int64_t timestamp() const noexcept
 	{
 	{
 		for(unsigned int i=0;i<_qualifierCount;++i) {
 		for(unsigned int i=0;i<_qualifierCount;++i) {
 			if (_qualifiers[i].id == COM_RESERVED_ID_TIMESTAMP)
 			if (_qualifiers[i].id == COM_RESERVED_ID_TIMESTAMP)
@@ -139,7 +139,7 @@ public:
 	/**
 	/**
 	 * @return Address to which this cert was issued
 	 * @return Address to which this cert was issued
 	 */
 	 */
-	ZT_ALWAYS_INLINE Address issuedTo() const
+	ZT_ALWAYS_INLINE Address issuedTo() const noexcept
 	{
 	{
 		for(unsigned int i=0;i<_qualifierCount;++i) {
 		for(unsigned int i=0;i<_qualifierCount;++i) {
 			if (_qualifiers[i].id == COM_RESERVED_ID_ISSUED_TO)
 			if (_qualifiers[i].id == COM_RESERVED_ID_ISSUED_TO)
@@ -151,7 +151,7 @@ public:
 	/**
 	/**
 	 * @return Network ID for which this cert was issued
 	 * @return Network ID for which this cert was issued
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint64_t networkId() const
+	ZT_ALWAYS_INLINE uint64_t networkId() const noexcept
 	{
 	{
 		for(unsigned int i=0;i<_qualifierCount;++i) {
 		for(unsigned int i=0;i<_qualifierCount;++i) {
 			if (_qualifiers[i].id == COM_RESERVED_ID_NETWORK_ID)
 			if (_qualifiers[i].id == COM_RESERVED_ID_NETWORK_ID)
@@ -186,41 +186,7 @@ public:
 	 * @param other Cert to compare with
 	 * @param other Cert to compare with
 	 * @return True if certs agree and 'other' may be communicated with
 	 * @return True if certs agree and 'other' may be communicated with
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool agreesWith(const CertificateOfMembership &other) const
-	{
-		unsigned int myidx = 0;
-		unsigned int otheridx = 0;
-
-		if ((_qualifierCount == 0)||(other._qualifierCount == 0))
-			return false;
-
-		while (myidx < _qualifierCount) {
-			// Fail if we're at the end of other, since this means the field is
-			// missing.
-			if (otheridx >= other._qualifierCount)
-				return false;
-
-			// Seek to corresponding tuple in other, ignoring tuples that
-			// we may not have. If we run off the end of other, the tuple is
-			// missing. This works because tuples are sorted by ID.
-			while (other._qualifiers[otheridx].id != _qualifiers[myidx].id) {
-				++otheridx;
-				if (otheridx >= other._qualifierCount)
-					return false;
-			}
-
-			// Compare to determine if the absolute value of the difference
-			// between these two parameters is within our maxDelta.
-			const uint64_t a = _qualifiers[myidx].value;
-			const uint64_t b = other._qualifiers[myidx].value;
-			if (((a >= b) ? (a - b) : (b - a)) > _qualifiers[myidx].maxDelta)
-				return false;
-
-			++myidx;
-		}
-
-		return true;
-	}
+	bool agreesWith(const CertificateOfMembership &other) const;
 
 
 	/**
 	/**
 	 * Sign this certificate
 	 * Sign this certificate
@@ -241,11 +207,11 @@ public:
 	/**
 	/**
 	 * @return Address that signed this certificate or null address if none
 	 * @return Address that signed this certificate or null address if none
 	 */
 	 */
-	ZT_ALWAYS_INLINE const Address &signedBy() const { return _signedBy; }
+	ZT_ALWAYS_INLINE const Address &signedBy() const noexcept { return _signedBy; }
 
 
-	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX; }
-	int marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX]) const;
-	int unmarshal(const uint8_t *data,int len);
+	static constexpr int marshalSizeMax() noexcept { return ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX; }
+	int marshal(uint8_t data[ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX]) const noexcept;
+	int unmarshal(const uint8_t *data,int len) noexcept;
 
 
 	bool operator==(const CertificateOfMembership &c) const;
 	bool operator==(const CertificateOfMembership &c) const;
 	ZT_ALWAYS_INLINE bool operator!=(const CertificateOfMembership &c) const { return (!(*this == c)); }
 	ZT_ALWAYS_INLINE bool operator!=(const CertificateOfMembership &c) const { return (!(*this == c)); }
@@ -253,11 +219,11 @@ public:
 private:
 private:
 	struct _Qualifier
 	struct _Qualifier
 	{
 	{
-		ZT_ALWAYS_INLINE _Qualifier() : id(0),value(0),maxDelta(0) {}
+		ZT_ALWAYS_INLINE _Qualifier() noexcept : id(0),value(0),maxDelta(0) {}
 		uint64_t id;
 		uint64_t id;
 		uint64_t value;
 		uint64_t value;
 		uint64_t maxDelta;
 		uint64_t maxDelta;
-		ZT_ALWAYS_INLINE bool operator<(const _Qualifier &q) const { return (id < q.id); } // sort order
+		ZT_ALWAYS_INLINE bool operator<(const _Qualifier &q) const noexcept { return (id < q.id); } // sort order
 	};
 	};
 
 
 	Address _signedBy;
 	Address _signedBy;

+ 2 - 2
node/CertificateOfOwnership.cpp

@@ -48,7 +48,7 @@ bool CertificateOfOwnership::sign(const Identity &signer)
 	return false;
 	return false;
 }
 }
 
 
-int CertificateOfOwnership::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX],bool forSign) const
+int CertificateOfOwnership::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX],bool forSign) const noexcept
 {
 {
 	int p = 0;
 	int p = 0;
 	if (forSign) {
 	if (forSign) {
@@ -82,7 +82,7 @@ int CertificateOfOwnership::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSH
 	return p;
 	return p;
 }
 }
 
 
-int CertificateOfOwnership::unmarshal(const uint8_t *data,int len)
+int CertificateOfOwnership::unmarshal(const uint8_t *data,int len) noexcept
 {
 {
 	if (len < 30)
 	if (len < 30)
 		return -1;
 		return -1;

+ 22 - 22
node/CertificateOfOwnership.hpp

@@ -47,7 +47,7 @@ class CertificateOfOwnership : public Credential
 	friend class Credential;
 	friend class Credential;
 
 
 public:
 public:
-	static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_COO; }
+	static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COO; }
 
 
 	enum Thing
 	enum Thing
 	{
 	{
@@ -57,9 +57,9 @@ public:
 		THING_IPV6_ADDRESS = 3
 		THING_IPV6_ADDRESS = 3
 	};
 	};
 
 
-	ZT_ALWAYS_INLINE CertificateOfOwnership() { memoryZero(this); }
+	ZT_ALWAYS_INLINE CertificateOfOwnership() noexcept { memoryZero(this); }
 
 
-	ZT_ALWAYS_INLINE CertificateOfOwnership(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id)
+	ZT_ALWAYS_INLINE CertificateOfOwnership(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id) noexcept
 	{
 	{
 		memset(reinterpret_cast<void *>(this),0,sizeof(CertificateOfOwnership));
 		memset(reinterpret_cast<void *>(this),0,sizeof(CertificateOfOwnership));
 		_networkId = nwid;
 		_networkId = nwid;
@@ -68,19 +68,19 @@ public:
 		_issuedTo = issuedTo;
 		_issuedTo = issuedTo;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE uint64_t networkId() const { return _networkId; }
-	ZT_ALWAYS_INLINE int64_t timestamp() const { return _ts; }
-	ZT_ALWAYS_INLINE uint32_t id() const { return _id; }
-	ZT_ALWAYS_INLINE const Address &issuedTo() const { return _issuedTo; }
-	ZT_ALWAYS_INLINE const Address &signer() const { return _signedBy; }
-	ZT_ALWAYS_INLINE const uint8_t *signature() const { return _signature; }
-	ZT_ALWAYS_INLINE unsigned int signatureLength() const { return _signatureLength; }
+	ZT_ALWAYS_INLINE uint64_t networkId() const noexcept { return _networkId; }
+	ZT_ALWAYS_INLINE int64_t timestamp() const noexcept { return _ts; }
+	ZT_ALWAYS_INLINE uint32_t id() const noexcept { return _id; }
+	ZT_ALWAYS_INLINE const Address &issuedTo() const noexcept { return _issuedTo; }
+	ZT_ALWAYS_INLINE const Address &signer() const noexcept { return _signedBy; }
+	ZT_ALWAYS_INLINE const uint8_t *signature() const noexcept { return _signature; }
+	ZT_ALWAYS_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
 
 
-	ZT_ALWAYS_INLINE unsigned int thingCount() const { return (unsigned int)_thingCount; }
-	ZT_ALWAYS_INLINE Thing thingType(const unsigned int i) const { return (Thing)_thingTypes[i]; }
-	ZT_ALWAYS_INLINE const uint8_t *thingValue(const unsigned int i) const { return _thingValues[i]; }
+	ZT_ALWAYS_INLINE unsigned int thingCount() const noexcept { return (unsigned int)_thingCount; }
+	ZT_ALWAYS_INLINE Thing thingType(const unsigned int i) const noexcept { return (Thing)_thingTypes[i]; }
+	ZT_ALWAYS_INLINE const uint8_t *thingValue(const unsigned int i) const noexcept { return _thingValues[i]; }
 
 
-	ZT_ALWAYS_INLINE bool owns(const InetAddress &ip) const
+	ZT_ALWAYS_INLINE bool owns(const InetAddress &ip) const noexcept
 	{
 	{
 		if (ip.ss_family == AF_INET)
 		if (ip.ss_family == AF_INET)
 			return this->_owns(THING_IPV4_ADDRESS,&(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr),4);
 			return this->_owns(THING_IPV4_ADDRESS,&(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr),4);
@@ -89,7 +89,7 @@ public:
 		return false;
 		return false;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE bool owns(const MAC &mac) const
+	ZT_ALWAYS_INLINE bool owns(const MAC &mac) const noexcept
 	{
 	{
 		uint8_t tmp[6];
 		uint8_t tmp[6];
 		mac.copyTo(tmp);
 		mac.copyTo(tmp);
@@ -107,18 +107,18 @@ public:
 
 
 	ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); }
 	ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); }
 
 
-	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; }
-	int marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX],bool forSign = false) const;
-	int unmarshal(const uint8_t *data,int len);
+	static constexpr int marshalSizeMax() noexcept { return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; }
+	int marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX],bool forSign = false) const noexcept;
+	int unmarshal(const uint8_t *data,int len) noexcept;
 
 
 	// Provides natural sort order by ID
 	// Provides natural sort order by ID
-	ZT_ALWAYS_INLINE bool operator<(const CertificateOfOwnership &coo) const { return (_id < coo._id); }
+	ZT_ALWAYS_INLINE bool operator<(const CertificateOfOwnership &coo) const noexcept { return (_id < coo._id); }
 
 
-	ZT_ALWAYS_INLINE bool operator==(const CertificateOfOwnership &coo) const { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) == 0); }
-	ZT_ALWAYS_INLINE bool operator!=(const CertificateOfOwnership &coo) const { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) != 0); }
+	ZT_ALWAYS_INLINE bool operator==(const CertificateOfOwnership &coo) const noexcept { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) == 0); }
+	ZT_ALWAYS_INLINE bool operator!=(const CertificateOfOwnership &coo) const noexcept { return (memcmp(this,&coo,sizeof(CertificateOfOwnership)) != 0); }
 
 
 private:
 private:
-	ZT_ALWAYS_INLINE bool _owns(const Thing &t,const void *v,unsigned int l) const
+	ZT_ALWAYS_INLINE bool _owns(const Thing &t,const void *v,unsigned int l) const noexcept
 	{
 	{
 		for(unsigned int i=0,j=_thingCount;i<j;++i) {
 		for(unsigned int i=0,j=_thingCount;i<j;++i) {
 			if (_thingTypes[i] == (uint8_t)t) {
 			if (_thingTypes[i] == (uint8_t)t) {

+ 4 - 4
node/Constants.hpp

@@ -82,14 +82,14 @@
 #define ZT_PEER_SECRET_KEY_LENGTH 32
 #define ZT_PEER_SECRET_KEY_LENGTH 32
 
 
 /**
 /**
- * Minimum delay between timer task checks to prevent thrashing
+ * Maximum delay between timer task checks
  */
  */
-#define ZT_MIN_TIMER_TASK_INTERVAL 500
+#define ZT_MAX_TIMER_TASK_INTERVAL 1000
 
 
 /**
 /**
- * Maximum delay between timer task checks (should be a fraction of smallest housekeeping interval)
+ * Interval between steps or stages in NAT-t attempts
  */
  */
-#define ZT_MAX_TIMER_TASK_INTERVAL 5000
+#define ZT_NAT_TRAVERSAL_INTERVAL 200
 
 
 /**
 /**
  * How often most internal cleanup and housekeeping tasks are performed
  * How often most internal cleanup and housekeeping tasks are performed

+ 3 - 3
node/Credential.cpp

@@ -52,7 +52,7 @@ static ZT_ALWAYS_INLINE Credential::VerifyResult _credVerify(const RuntimeEnviro
 	if ((!signedBy)||(signedBy != Network::controllerFor(networkId)))
 	if ((!signedBy)||(signedBy != Network::controllerFor(networkId)))
 		return Credential::VERIFY_BAD_SIGNATURE;
 		return Credential::VERIFY_BAD_SIGNATURE;
 
 
-	const SharedPtr<Peer> peer(RR->topology->get(tPtr,signedBy));
+	const SharedPtr<Peer> peer(RR->topology->peer(tPtr,signedBy));
 	if (!peer) {
 	if (!peer) {
 		RR->sw->requestWhois(tPtr,RR->node->now(),signedBy);
 		RR->sw->requestWhois(tPtr,RR->node->now(),signedBy);
 		return Credential::VERIFY_NEED_IDENTITY;
 		return Credential::VERIFY_NEED_IDENTITY;
@@ -77,7 +77,7 @@ Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *const RR,
 	if ((!credential._signedBy)||(credential._signedBy != Network::controllerFor(credential.networkId()))||(credential._qualifierCount > ZT_NETWORK_COM_MAX_QUALIFIERS))
 	if ((!credential._signedBy)||(credential._signedBy != Network::controllerFor(credential.networkId()))||(credential._qualifierCount > ZT_NETWORK_COM_MAX_QUALIFIERS))
 		return Credential::VERIFY_BAD_SIGNATURE;
 		return Credential::VERIFY_BAD_SIGNATURE;
 
 
-	const SharedPtr<Peer> peer(RR->topology->get(tPtr,credential._signedBy));
+	const SharedPtr<Peer> peer(RR->topology->peer(tPtr,credential._signedBy));
 	if (!peer) {
 	if (!peer) {
 		RR->sw->requestWhois(tPtr,RR->node->now(),credential._signedBy);
 		RR->sw->requestWhois(tPtr,RR->node->now(),credential._signedBy);
 		return Credential::VERIFY_NEED_IDENTITY;
 		return Credential::VERIFY_NEED_IDENTITY;
@@ -118,7 +118,7 @@ Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *RR,void *
 					return Credential::VERIFY_BAD_SIGNATURE; // otherwise if we have another entry it must be from the previous holder in the chain
 					return Credential::VERIFY_BAD_SIGNATURE; // otherwise if we have another entry it must be from the previous holder in the chain
 			}
 			}
 
 
-			const SharedPtr<Peer> peer(RR->topology->get(tPtr,credential._custody[c].from));
+			const SharedPtr<Peer> peer(RR->topology->peer(tPtr,credential._custody[c].from));
 			if (peer) {
 			if (peer) {
 				if (!peer->identity().verify(tmp,(unsigned int)l,credential._custody[c].signature,credential._custody[c].signatureLength))
 				if (!peer->identity().verify(tmp,(unsigned int)l,credential._custody[c].signature,credential._custody[c].signatureLength))
 					return Credential::VERIFY_BAD_SIGNATURE;
 					return Credential::VERIFY_BAD_SIGNATURE;

+ 19 - 35
node/Defragmenter.hpp

@@ -16,7 +16,6 @@
 
 
 #include "Constants.hpp"
 #include "Constants.hpp"
 #include "Buf.hpp"
 #include "Buf.hpp"
-#include "AtomicCounter.hpp"
 #include "SharedPtr.hpp"
 #include "SharedPtr.hpp"
 #include "Hashtable.hpp"
 #include "Hashtable.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
@@ -38,7 +37,7 @@ namespace ZeroTier {
  * hairiness makes it very desirable to be able to test and fuzz this code
  * hairiness makes it very desirable to be able to test and fuzz this code
  * independently.
  * independently.
  *
  *
- * Here be dragons!
+ * This class is thread-safe and handles locking internally.
  *
  *
  * @tparam MF Maximum number of fragments that each message can possess
  * @tparam MF Maximum number of fragments that each message can possess
  * @tparam GCS Garbage collection target size for the incoming message queue
  * @tparam GCS Garbage collection target size for the incoming message queue
@@ -148,10 +147,10 @@ public:
 		if ((fragmentNo >= totalFragmentsExpected)||(totalFragmentsExpected > MF)||(totalFragmentsExpected == 0))
 		if ((fragmentNo >= totalFragmentsExpected)||(totalFragmentsExpected > MF)||(totalFragmentsExpected == 0))
 			return ERR_INVALID_FRAGMENT;
 			return ERR_INVALID_FRAGMENT;
 
 
-		// Lock messages for read and look up current entry. Also check the
-		// GC trigger and if we've exceeded that threshold then older message
-		// entries are garbage collected.
-		_messages_l.rlock();
+		// We hold the read lock on _messages unless we need to add a new entry or do GC.
+		RWMutex::RMaybeWLock ml(_messages_l);
+
+		// Check message hash table size and perform GC if necessary.
 		if (_messages.size() >= GCT) {
 		if (_messages.size() >= GCT) {
 			try {
 			try {
 				// Scan messages with read lock still locked first and make a sorted list of
 				// Scan messages with read lock still locked first and make a sorted list of
@@ -160,7 +159,7 @@ public:
 				// under the target size. This tries to minimize the amount of time the write
 				// under the target size. This tries to minimize the amount of time the write
 				// lock is held since many threads can hold the read lock but all threads must
 				// lock is held since many threads can hold the read lock but all threads must
 				// wait if someone holds the write lock.
 				// wait if someone holds the write lock.
-				std::vector< std::pair<int64_t,uint64_t> > messagesByLastUsedTime;
+				std::vector<std::pair<int64_t,uint64_t> > messagesByLastUsedTime;
 				messagesByLastUsedTime.reserve(_messages.size());
 				messagesByLastUsedTime.reserve(_messages.size());
 
 
 				typename Hashtable<uint64_t,_E>::Iterator i(_messages);
 				typename Hashtable<uint64_t,_E>::Iterator i(_messages);
@@ -171,47 +170,37 @@ public:
 
 
 				std::sort(messagesByLastUsedTime.begin(),messagesByLastUsedTime.end());
 				std::sort(messagesByLastUsedTime.begin(),messagesByLastUsedTime.end());
 
 
-				_messages_l.runlock(); _messages_l.lock();
+				ml.writing(); // acquire write lock on _messages
 				for (unsigned long x = 0,y = (messagesByLastUsedTime.size() - GCS); x <= y; ++x)
 				for (unsigned long x = 0,y = (messagesByLastUsedTime.size() - GCS); x <= y; ++x)
 					_messages.erase(messagesByLastUsedTime[x].second);
 					_messages.erase(messagesByLastUsedTime[x].second);
-				_messages_l.unlock(); _messages_l.rlock();
 			} catch (...) {
 			} catch (...) {
-				// The only way something in that code can throw is if a bad_alloc occurs when
-				// reserve() is called in the vector. In this case we flush the entire queue
-				// and error out. This is very rare and on some platforms impossible.
-				_messages_l.runlock();
-				_messages_l.lock();
-				_messages.clear();
-				_messages_l.unlock();
 				return ERR_OUT_OF_MEMORY;
 				return ERR_OUT_OF_MEMORY;
 			}
 			}
 		}
 		}
-		_E *e = _messages.get(messageId);
-		_messages_l.runlock();
 
 
-		// If no entry exists we must briefly lock messages for write and create a new one.
+		// Get or create message fragment.
+		_E *e = _messages.get(messageId);
 		if (!e) {
 		if (!e) {
+			ml.writing(); // acquire write lock on _messages if not already
 			try {
 			try {
-				RWMutex::Lock ml(_messages_l);
 				e = &(_messages[messageId]);
 				e = &(_messages[messageId]);
-			} catch ( ... ) {
+			} catch (...) {
 				return ERR_OUT_OF_MEMORY;
 				return ERR_OUT_OF_MEMORY;
 			}
 			}
 			e->id = messageId;
 			e->id = messageId;
 		}
 		}
 
 
-		// Now handle this fragment within this individual message entry.
-		Mutex::Lock el(e->lock);
+		// Switch back to holding only the read lock on _messages if we have locked for write
+		ml.reading();
 
 
-		// Note: it's important that _messages_l is not locked while the entry
-		// is locked or a deadlock could occur due to GC or clear() being called
-		// in another thread.
+		// Acquire lock on entry itself
+		Mutex::Lock el(e->lock);
 
 
 		// This magic value means this message has already been assembled and is done.
 		// This magic value means this message has already been assembled and is done.
 		if (e->lastUsed < 0)
 		if (e->lastUsed < 0)
 			return ERR_DUPLICATE_FRAGMENT;
 			return ERR_DUPLICATE_FRAGMENT;
 
 
-		// Update last-activity timestamp for this entry.
+		// Update last-activity timestamp for this entry, delaying GC.
 		e->lastUsed = now;
 		e->lastUsed = now;
 
 
 		// Learn total fragments expected if a value is given. Otherwise the cached
 		// Learn total fragments expected if a value is given. Otherwise the cached
@@ -294,14 +283,9 @@ private:
 		ZT_ALWAYS_INLINE _E() : id(0),lastUsed(0),totalFragmentsExpected(0),via(),message(),lock() {}
 		ZT_ALWAYS_INLINE _E() : id(0),lastUsed(0),totalFragmentsExpected(0),via(),message(),lock() {}
 		ZT_ALWAYS_INLINE ~_E()
 		ZT_ALWAYS_INLINE ~_E()
 		{
 		{
-			// Ensure that this entry is not in use while it is being deleted!
-			lock.lock();
-			if (via) {
-				via->_inboundFragmentedMessages_l.lock();
-				via->_inboundFragmentedMessages.erase(id);
-				via->_inboundFragmentedMessages_l.unlock();
-			}
-			lock.unlock();
+			via->_inboundFragmentedMessages_l.lock();
+			via->_inboundFragmentedMessages.erase(id);
+			via->_inboundFragmentedMessages_l.unlock();
 		}
 		}
 		uint64_t id;
 		uint64_t id;
 		volatile int64_t lastUsed;
 		volatile int64_t lastUsed;

+ 2 - 2
node/Dictionary.hpp

@@ -138,12 +138,12 @@ public:
 	/**
 	/**
 	 * @return Number of entries
 	 * @return Number of entries
 	 */
 	 */
-	ZT_ALWAYS_INLINE unsigned int size() const { return _t.size(); }
+	ZT_ALWAYS_INLINE unsigned int size() const noexcept { return _t.size(); }
 
 
 	/**
 	/**
 	 * @return True if dictionary is not empty
 	 * @return True if dictionary is not empty
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool empty() const { return _t.empty(); }
+	ZT_ALWAYS_INLINE bool empty() const noexcept { return _t.empty(); }
 
 
 	/**
 	/**
 	 * Encode to a string in the supplied vector
 	 * Encode to a string in the supplied vector

+ 2 - 2
node/Endpoint.cpp

@@ -53,7 +53,7 @@ bool Endpoint::operator<(const Endpoint &ep) const
 	return false;
 	return false;
 }
 }
 
 
-int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const
+int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept
 {
 {
 	int p;
 	int p;
 	data[0] = (uint8_t)_t;
 	data[0] = (uint8_t)_t;
@@ -108,7 +108,7 @@ int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const
 	}
 	}
 }
 }
 
 
-int Endpoint::unmarshal(const uint8_t *restrict data,const int len)
+int Endpoint::unmarshal(const uint8_t *restrict data,const int len) noexcept
 {
 {
 	if (len < 7)
 	if (len < 7)
 		return -1;
 		return -1;

+ 13 - 13
node/Endpoint.hpp

@@ -51,7 +51,7 @@ public:
 		UNRECOGNIZED = 255  // Unrecognized endpoint type encountered in stream
 		UNRECOGNIZED = 255  // Unrecognized endpoint type encountered in stream
 	};
 	};
 
 
-	ZT_ALWAYS_INLINE Endpoint() { memoryZero(this); }
+	ZT_ALWAYS_INLINE Endpoint() noexcept { memoryZero(this); }
 
 
 	explicit ZT_ALWAYS_INLINE Endpoint(const InetAddress &sa) { *this = sa; }
 	explicit ZT_ALWAYS_INLINE Endpoint(const InetAddress &sa) { *this = sa; }
 
 
@@ -93,44 +93,44 @@ public:
 	/**
 	/**
 	 * @return InetAddress or NIL if not of this type
 	 * @return InetAddress or NIL if not of this type
 	 */
 	 */
-	ZT_ALWAYS_INLINE const InetAddress &inetAddr() const { return ((_t == INETADDR_V4)||(_t == INETADDR_V6)) ? *reinterpret_cast<const InetAddress *>(&_v.sa) : InetAddress::NIL; }
+	ZT_ALWAYS_INLINE const InetAddress &inetAddr() const noexcept { return ((_t == INETADDR_V4)||(_t == INETADDR_V6)) ? *reinterpret_cast<const InetAddress *>(&_v.sa) : InetAddress::NIL; }
 
 
 	/**
 	/**
 	 * @return DNS name or empty string if not of this type
 	 * @return DNS name or empty string if not of this type
 	 */
 	 */
-	ZT_ALWAYS_INLINE const char *dnsName() const { return (_t == DNSNAME) ? _v.dns.name : ""; }
+	ZT_ALWAYS_INLINE const char *dnsName() const noexcept { return (_t == DNSNAME) ? _v.dns.name : ""; }
 
 
 	/**
 	/**
 	 * @return Port associated with DNS name or -1 if not of this type
 	 * @return Port associated with DNS name or -1 if not of this type
 	 */
 	 */
-	ZT_ALWAYS_INLINE int dnsPort() const { return (_t == DNSNAME) ? _v.dns.port : -1; }
+	ZT_ALWAYS_INLINE int dnsPort() const noexcept { return (_t == DNSNAME) ? _v.dns.port : -1; }
 
 
 	/**
 	/**
 	 * @return ZeroTier address or NIL if not of this type
 	 * @return ZeroTier address or NIL if not of this type
 	 */
 	 */
-	ZT_ALWAYS_INLINE Address ztAddress() const { return Address((_t == ZEROTIER) ? _v.zt.a : (uint64_t)0); }
+	ZT_ALWAYS_INLINE Address ztAddress() const noexcept { return Address((_t == ZEROTIER) ? _v.zt.a : (uint64_t)0); }
 
 
 	/**
 	/**
 	 * @return 384-bit hash of identity keys or NULL if not of this type
 	 * @return 384-bit hash of identity keys or NULL if not of this type
 	 */
 	 */
-	ZT_ALWAYS_INLINE const uint8_t *ztIdentityHash() const { return (_t == ZEROTIER) ? _v.zt.idh : nullptr; }
+	ZT_ALWAYS_INLINE const uint8_t *ztIdentityHash() const noexcept { return (_t == ZEROTIER) ? _v.zt.idh : nullptr; }
 
 
 	/**
 	/**
 	 * @return URL or empty string if not of this type
 	 * @return URL or empty string if not of this type
 	 */
 	 */
-	ZT_ALWAYS_INLINE const char *url() const { return (_t == URL) ? _v.url : ""; }
+	ZT_ALWAYS_INLINE const char *url() const noexcept { return (_t == URL) ? _v.url : ""; }
 
 
 	/**
 	/**
 	 * @return Ethernet address or NIL if not of this type
 	 * @return Ethernet address or NIL if not of this type
 	 */
 	 */
-	ZT_ALWAYS_INLINE MAC ethernet() const { return (_t == ETHERNET) ? MAC(_v.eth) : MAC(); }
+	ZT_ALWAYS_INLINE MAC ethernet() const noexcept { return (_t == ETHERNET) ? MAC(_v.eth) : MAC(); }
 
 
 	/**
 	/**
 	 * @return Endpoint type or NIL if unset/empty
 	 * @return Endpoint type or NIL if unset/empty
 	 */
 	 */
-	ZT_ALWAYS_INLINE Type type() const { return _t; }
+	ZT_ALWAYS_INLINE Type type() const noexcept { return _t; }
 
 
-	explicit ZT_ALWAYS_INLINE operator bool() const { return _t != NIL; }
+	explicit ZT_ALWAYS_INLINE operator bool() const noexcept { return _t != NIL; }
 
 
 	bool operator==(const Endpoint &ep) const;
 	bool operator==(const Endpoint &ep) const;
 	ZT_ALWAYS_INLINE bool operator!=(const Endpoint &ep) const { return (!(*this == ep)); }
 	ZT_ALWAYS_INLINE bool operator!=(const Endpoint &ep) const { return (!(*this == ep)); }
@@ -139,9 +139,9 @@ public:
 	ZT_ALWAYS_INLINE bool operator<=(const Endpoint &ep) const { return !(ep < *this); }
 	ZT_ALWAYS_INLINE bool operator<=(const Endpoint &ep) const { return !(ep < *this); }
 	ZT_ALWAYS_INLINE bool operator>=(const Endpoint &ep) const { return !(*this < ep); }
 	ZT_ALWAYS_INLINE bool operator>=(const Endpoint &ep) const { return !(*this < ep); }
 
 
-	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_ENDPOINT_MARSHAL_SIZE_MAX; }
-	int marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const;
-	int unmarshal(const uint8_t *restrict data,int len);
+	static constexpr int marshalSizeMax() noexcept { return ZT_ENDPOINT_MARSHAL_SIZE_MAX; }
+	int marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept;
+	int unmarshal(const uint8_t *restrict data,int len) noexcept;
 
 
 private:
 private:
 	Type _t;
 	Type _t;

+ 14 - 14
node/FCV.hpp

@@ -43,7 +43,7 @@ public:
 	typedef T * iterator;
 	typedef T * iterator;
 	typedef const T * const_iterator;
 	typedef const T * const_iterator;
 
 
-	ZT_ALWAYS_INLINE FCV() : _s(0) {}
+	ZT_ALWAYS_INLINE FCV() noexcept : _s(0) {}
 
 
 	template<unsigned int C2>
 	template<unsigned int C2>
 	ZT_ALWAYS_INLINE FCV(const FCV<T,C2> &v) : _s(0) { *this = v; }
 	ZT_ALWAYS_INLINE FCV(const FCV<T,C2> &v) : _s(0) { *this = v; }
@@ -77,7 +77,7 @@ public:
 	/**
 	/**
 	 * Clear without calling destructors (same as unsafeResize(0))
 	 * Clear without calling destructors (same as unsafeResize(0))
 	 */
 	 */
-	ZT_ALWAYS_INLINE void unsafeClear() { _s = 0; }
+	ZT_ALWAYS_INLINE void unsafeClear() noexcept { _s = 0; }
 
 
 	/**
 	/**
 	 * This does a straight copy of one vector's data to another
 	 * This does a straight copy of one vector's data to another
@@ -91,7 +91,7 @@ public:
 	 * @param v Other vector to copy to this one
 	 * @param v Other vector to copy to this one
 	 */
 	 */
 	template<unsigned int C2>
 	template<unsigned int C2>
-	ZT_ALWAYS_INLINE void unsafeAssign(const FCV<T,C2> &v)
+	ZT_ALWAYS_INLINE void unsafeAssign(const FCV<T,C2> &v) noexcept
 	{
 	{
 		_s = ((C2 > C)&&(v._s > C)) ? C : v._s;
 		_s = ((C2 > C)&&(v._s > C)) ? C : v._s;
 		memcpy(_m,v._m,_s * sizeof(T));
 		memcpy(_m,v._m,_s * sizeof(T));
@@ -105,23 +105,23 @@ public:
 	 *
 	 *
 	 * @param v Target vector
 	 * @param v Target vector
 	 */
 	 */
-	ZT_ALWAYS_INLINE void unsafeMoveTo(FCV &v)
+	ZT_ALWAYS_INLINE void unsafeMoveTo(FCV &v) noexcept
 	{
 	{
 		memcpy(v._m,_m,(v._s = _s) * sizeof(T));
 		memcpy(v._m,_m,(v._s = _s) * sizeof(T));
 		_s = 0;
 		_s = 0;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE iterator begin() { return reinterpret_cast<T *>(_m); }
-	ZT_ALWAYS_INLINE const_iterator begin() const { return reinterpret_cast<const T *>(_m); }
-	ZT_ALWAYS_INLINE iterator end() { return reinterpret_cast<T *>(_m) + _s; }
-	ZT_ALWAYS_INLINE const_iterator end() const { return reinterpret_cast<const T *>(_m) + _s; }
+	ZT_ALWAYS_INLINE iterator begin() noexcept { return reinterpret_cast<T *>(_m); }
+	ZT_ALWAYS_INLINE const_iterator begin() const noexcept { return reinterpret_cast<const T *>(_m); }
+	ZT_ALWAYS_INLINE iterator end() noexcept { return reinterpret_cast<T *>(_m) + _s; }
+	ZT_ALWAYS_INLINE const_iterator end() const noexcept { return reinterpret_cast<const T *>(_m) + _s; }
 
 
-	ZT_ALWAYS_INLINE T &operator[](const unsigned int i) { return reinterpret_cast<T *>(_m)[i]; }
-	ZT_ALWAYS_INLINE const T &operator[](const unsigned int i) const { return reinterpret_cast<T *>(_m)[i]; }
+	ZT_ALWAYS_INLINE T &operator[](const unsigned int i) noexcept { return reinterpret_cast<T *>(_m)[i]; }
+	ZT_ALWAYS_INLINE const T &operator[](const unsigned int i) const noexcept { return reinterpret_cast<T *>(_m)[i]; }
 
 
-	ZT_ALWAYS_INLINE unsigned int size() const { return _s; }
-	ZT_ALWAYS_INLINE bool empty() const { return (_s == 0); }
-	static constexpr unsigned int capacity() { return C; }
+	ZT_ALWAYS_INLINE unsigned int size() const noexcept { return _s; }
+	ZT_ALWAYS_INLINE bool empty() const noexcept { return (_s == 0); }
+	static constexpr unsigned int capacity() noexcept { return C; }
 
 
 	/**
 	/**
 	 * Push a value onto the back of this vector
 	 * Push a value onto the back of this vector
@@ -200,7 +200,7 @@ public:
 	 *
 	 *
 	 * @param ns New size (clipped to C if larger than capacity)
 	 * @param ns New size (clipped to C if larger than capacity)
 	 */
 	 */
-	ZT_ALWAYS_INLINE void unsafeResize(const unsigned int ns) { _s = (ns > C) ? C : ns; }
+	ZT_ALWAYS_INLINE void unsafeResize(const unsigned int ns) noexcept { _s = (ns > C) ? C : ns; }
 
 
 	/**
 	/**
 	 * This is a bounds checked auto-resizing variant of the [] operator
 	 * This is a bounds checked auto-resizing variant of the [] operator

+ 13 - 13
node/Hashtable.hpp

@@ -57,7 +57,7 @@ public:
 		/**
 		/**
 		 * @param ht Hash table to iterate over
 		 * @param ht Hash table to iterate over
 		 */
 		 */
-		explicit ZT_ALWAYS_INLINE Iterator(Hashtable &ht) :
+		explicit ZT_ALWAYS_INLINE Iterator(Hashtable &ht) noexcept :
 			_idx(0),
 			_idx(0),
 			_ht(&ht),
 			_ht(&ht),
 			_b(ht._t[0])
 			_b(ht._t[0])
@@ -344,26 +344,26 @@ public:
 	/**
 	/**
 	 * @return Number of entries
 	 * @return Number of entries
 	 */
 	 */
-	ZT_ALWAYS_INLINE unsigned long size() const { return _s; }
+	ZT_ALWAYS_INLINE unsigned long size() const noexcept { return _s; }
 
 
 	/**
 	/**
 	 * @return True if table is empty
 	 * @return True if table is empty
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool empty() const { return (_s == 0); }
+	ZT_ALWAYS_INLINE bool empty() const noexcept { return (_s == 0); }
 
 
 private:
 private:
 	template<typename O>
 	template<typename O>
 	static ZT_ALWAYS_INLINE unsigned long _hc(const O &obj) { return (unsigned long)obj.hashCode(); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(const O &obj) { return (unsigned long)obj.hashCode(); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const uint64_t i) { return (unsigned long)(i ^ (i >> 32U)); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const uint32_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const uint16_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const uint8_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const int64_t i) { return (unsigned long)((unsigned long long)i ^ ((unsigned long long)i >> 32U)); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const int32_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const int16_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const int8_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(void *p) { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const void *p) { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const uint64_t i) noexcept { return (unsigned long)(i ^ (i >> 32U)); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const uint32_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const uint16_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const uint8_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const int64_t i) noexcept { return (unsigned long)((unsigned long long)i ^ ((unsigned long long)i >> 32U)); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const int32_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const int16_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const int8_t i) noexcept { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(void *p) noexcept { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const void *p) noexcept { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
 
 
 	ZT_ALWAYS_INLINE void _grow()
 	ZT_ALWAYS_INLINE void _grow()
 	{
 	{

+ 2 - 2
node/Identity.cpp

@@ -379,7 +379,7 @@ bool Identity::fromString(const char *str)
 	return true;
 	return true;
 }
 }
 
 
-int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],const bool includePrivate) const
+int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],const bool includePrivate) const noexcept
 {
 {
 	_address.copyTo(data);
 	_address.copyTo(data);
 	switch(_type) {
 	switch(_type) {
@@ -412,7 +412,7 @@ int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],const bool incl
 	return -1;
 	return -1;
 }
 }
 
 
-int Identity::unmarshal(const uint8_t *data,const int len)
+int Identity::unmarshal(const uint8_t *data,const int len) noexcept
 {
 {
 	if (len < (ZT_ADDRESS_LENGTH + 1))
 	if (len < (ZT_ADDRESS_LENGTH + 1))
 		return -1;
 		return -1;

+ 16 - 16
node/Identity.hpp

@@ -61,7 +61,7 @@ public:
 	 */
 	 */
 	static const Identity NIL;
 	static const Identity NIL;
 
 
-	ZT_ALWAYS_INLINE Identity() { memoryZero(this); }
+	ZT_ALWAYS_INLINE Identity() noexcept { memoryZero(this); }
 	ZT_ALWAYS_INLINE ~Identity() { Utils::burn(reinterpret_cast<void *>(&this->_priv),sizeof(this->_priv)); }
 	ZT_ALWAYS_INLINE ~Identity() { Utils::burn(reinterpret_cast<void *>(&this->_priv),sizeof(this->_priv)); }
 
 
 	/**
 	/**
@@ -77,12 +77,12 @@ public:
 	/**
 	/**
 	 * Set identity to NIL value (all zero)
 	 * Set identity to NIL value (all zero)
 	 */
 	 */
-	ZT_ALWAYS_INLINE void zero() { memoryZero(this); }
+	ZT_ALWAYS_INLINE void zero() noexcept { memoryZero(this); }
 
 
 	/**
 	/**
 	 * @return Identity type (undefined if identity is null or invalid)
 	 * @return Identity type (undefined if identity is null or invalid)
 	 */
 	 */
-	ZT_ALWAYS_INLINE Type type() const { return _type; }
+	ZT_ALWAYS_INLINE Type type() const noexcept { return _type; }
 
 
 	/**
 	/**
 	 * Generate a new identity (address, key pair)
 	 * Generate a new identity (address, key pair)
@@ -103,7 +103,7 @@ public:
 	/**
 	/**
 	 * @return True if this identity contains a private key
 	 * @return True if this identity contains a private key
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool hasPrivate() const { return _hasPrivate; }
+	ZT_ALWAYS_INLINE bool hasPrivate() const noexcept { return _hasPrivate; }
 
 
 	/**
 	/**
 	 * @return 384-bit/48-byte hash of this identity's public key(s)
 	 * @return 384-bit/48-byte hash of this identity's public key(s)
@@ -161,7 +161,7 @@ public:
 	/**
 	/**
 	 * @return This identity's address
 	 * @return This identity's address
 	 */
 	 */
-	ZT_ALWAYS_INLINE const Address &address() const { return _address; }
+	ZT_ALWAYS_INLINE const Address &address() const noexcept { return _address; }
 
 
 	/**
 	/**
 	 * Serialize to a more human-friendly string
 	 * Serialize to a more human-friendly string
@@ -186,9 +186,9 @@ public:
 	/**
 	/**
 	 * @return True if this identity contains something
 	 * @return True if this identity contains something
 	 */
 	 */
-	explicit ZT_ALWAYS_INLINE operator bool() const { return (_address); }
+	explicit ZT_ALWAYS_INLINE operator bool() const noexcept { return (_address); }
 
 
-	ZT_ALWAYS_INLINE bool operator==(const Identity &id) const
+	ZT_ALWAYS_INLINE bool operator==(const Identity &id) const noexcept
 	{
 	{
 		if ((_address == id._address)&&(_type == id._type)) {
 		if ((_address == id._address)&&(_type == id._type)) {
 			switch(_type) {
 			switch(_type) {
@@ -199,7 +199,7 @@ public:
 		}
 		}
 		return false;
 		return false;
 	}
 	}
-	ZT_ALWAYS_INLINE bool operator<(const Identity &id) const
+	ZT_ALWAYS_INLINE bool operator<(const Identity &id) const noexcept
 	{
 	{
 		if (_address < id._address)
 		if (_address < id._address)
 			return true;
 			return true;
@@ -216,16 +216,16 @@ public:
 		}
 		}
 		return false;
 		return false;
 	}
 	}
-	ZT_ALWAYS_INLINE bool operator!=(const Identity &id) const { return !(*this == id); }
-	ZT_ALWAYS_INLINE bool operator>(const Identity &id) const { return (id < *this); }
-	ZT_ALWAYS_INLINE bool operator<=(const Identity &id) const { return !(id < *this); }
-	ZT_ALWAYS_INLINE bool operator>=(const Identity &id) const { return !(*this < id); }
+	ZT_ALWAYS_INLINE bool operator!=(const Identity &id) const noexcept { return !(*this == id); }
+	ZT_ALWAYS_INLINE bool operator>(const Identity &id) const noexcept { return (id < *this); }
+	ZT_ALWAYS_INLINE bool operator<=(const Identity &id) const noexcept { return !(id < *this); }
+	ZT_ALWAYS_INLINE bool operator>=(const Identity &id) const noexcept { return !(*this < id); }
 
 
-	ZT_ALWAYS_INLINE unsigned long hashCode() const { return ((unsigned long)_address.toInt() + (unsigned long)_pub.c25519[0] + (unsigned long)_pub.c25519[1] + (unsigned long)_pub.c25519[2]); }
+	ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return ((unsigned long)_address.toInt() + (unsigned long)_pub.c25519[0] + (unsigned long)_pub.c25519[1] + (unsigned long)_pub.c25519[2]); }
 
 
-	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_IDENTITY_MARSHAL_SIZE_MAX; }
-	int marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],bool includePrivate = false) const;
-	int unmarshal(const uint8_t *data,int len);
+	static constexpr int marshalSizeMax() noexcept { return ZT_IDENTITY_MARSHAL_SIZE_MAX; }
+	int marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],bool includePrivate = false) const noexcept;
+	int unmarshal(const uint8_t *data,int len) noexcept;
 
 
 private:
 private:
 	void _computeHash(); // recompute _hash
 	void _computeHash(); // recompute _hash

+ 2 - 2
node/InetAddress.cpp

@@ -110,7 +110,7 @@ void InetAddress::set(const void *ipBytes,unsigned int ipLen,unsigned int port)
 	}
 	}
 }
 }
 
 
-char *InetAddress::toString(char buf[64]) const
+char *InetAddress::toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const
 {
 {
 	char *p = toIpString(buf);
 	char *p = toIpString(buf);
 	if (*p) {
 	if (*p) {
@@ -121,7 +121,7 @@ char *InetAddress::toString(char buf[64]) const
 	return buf;
 	return buf;
 }
 }
 
 
-char *InetAddress::toIpString(char buf[64]) const
+char *InetAddress::toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const
 {
 {
 	buf[0] = (char)0;
 	buf[0] = (char)0;
 	switch(ss_family) {
 	switch(ss_family) {

+ 3 - 2
node/InetAddress.hpp

@@ -26,6 +26,7 @@
 namespace ZeroTier {
 namespace ZeroTier {
 
 
 #define ZT_INETADDRESS_MARSHAL_SIZE_MAX 19
 #define ZT_INETADDRESS_MARSHAL_SIZE_MAX 19
+#define ZT_INETADDRESS_STRING_SIZE_MAX 64
 
 
 /**
 /**
  * Extends sockaddr_storage with friendly C++ methods
  * Extends sockaddr_storage with friendly C++ methods
@@ -213,12 +214,12 @@ public:
 	/**
 	/**
 	 * @return ASCII IP/port format representation
 	 * @return ASCII IP/port format representation
 	 */
 	 */
-	char *toString(char buf[64]) const;
+	char *toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const;
 
 
 	/**
 	/**
 	 * @return IP portion only, in ASCII string format
 	 * @return IP portion only, in ASCII string format
 	 */
 	 */
-	char *toIpString(char buf[64]) const;
+	char *toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const;
 
 
 	/**
 	/**
 	 * @param ipSlashPort IP/port (port is optional, will be 0 if not included)
 	 * @param ipSlashPort IP/port (port is optional, will be 0 if not included)

+ 26 - 27
node/MAC.hpp

@@ -31,33 +31,32 @@ namespace ZeroTier {
 class MAC : public TriviallyCopyable
 class MAC : public TriviallyCopyable
 {
 {
 public:
 public:
-	ZT_ALWAYS_INLINE MAC() : _m(0ULL) {}
-	ZT_ALWAYS_INLINE MAC(const uint8_t a,const uint8_t b,const uint8_t c,const uint8_t d,const uint8_t e,const uint8_t f) :
-		_m( (((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U) | (((uint64_t)e) << 8U) | ((uint64_t)f) ) {}
-	explicit ZT_ALWAYS_INLINE MAC(const uint64_t m) : _m(m & 0xffffffffffffULL) {}
-	explicit ZT_ALWAYS_INLINE MAC(const uint8_t b[6]) { setTo(b); }
-	ZT_ALWAYS_INLINE MAC(const Address &ztaddr,uint64_t nwid) { fromAddress(ztaddr,nwid); }
+	ZT_ALWAYS_INLINE MAC() noexcept : _m(0ULL) {}
+	ZT_ALWAYS_INLINE MAC(const uint8_t a,const uint8_t b,const uint8_t c,const uint8_t d,const uint8_t e,const uint8_t f) noexcept : _m( (((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U) | (((uint64_t)e) << 8U) | ((uint64_t)f) ) {}
+	explicit ZT_ALWAYS_INLINE MAC(const uint64_t m) noexcept : _m(m & 0xffffffffffffULL) {}
+	explicit ZT_ALWAYS_INLINE MAC(const uint8_t b[6]) noexcept { setTo(b); }
+	ZT_ALWAYS_INLINE MAC(const Address &ztaddr,uint64_t nwid) noexcept { fromAddress(ztaddr,nwid); }
 
 
 	/**
 	/**
 	 * @return MAC in 64-bit integer
 	 * @return MAC in 64-bit integer
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint64_t toInt() const { return _m; }
+	ZT_ALWAYS_INLINE uint64_t toInt() const noexcept { return _m; }
 
 
 	/**
 	/**
 	 * Set MAC to zero
 	 * Set MAC to zero
 	 */
 	 */
-	ZT_ALWAYS_INLINE void zero() { _m = 0ULL; }
+	ZT_ALWAYS_INLINE void zero() noexcept { _m = 0ULL; }
 
 
 	/**
 	/**
 	 * @return True if MAC is non-zero
 	 * @return True if MAC is non-zero
 	 */
 	 */
-	ZT_ALWAYS_INLINE operator bool() const { return (_m != 0ULL); }
+	ZT_ALWAYS_INLINE operator bool() const noexcept { return (_m != 0ULL); }
 
 
 	/**
 	/**
 	 * @param bits Raw MAC in big-endian byte order
 	 * @param bits Raw MAC in big-endian byte order
 	 * @param len Length, must be >= 6 or result is zero
 	 * @param len Length, must be >= 6 or result is zero
 	 */
 	 */
-	ZT_ALWAYS_INLINE void setTo(const uint8_t b[6])
+	ZT_ALWAYS_INLINE void setTo(const uint8_t b[6]) noexcept
 	{
 	{
 		_m = ((uint64_t)b[0] << 40U) | ((uint64_t)b[1] << 32U) | ((uint64_t)b[2] << 24U) | ((uint64_t)b[3] << 16U) | ((uint64_t)b[4] << 8U) | (uint64_t)b[5];
 		_m = ((uint64_t)b[0] << 40U) | ((uint64_t)b[1] << 32U) | ((uint64_t)b[2] << 24U) | ((uint64_t)b[3] << 16U) | ((uint64_t)b[4] << 8U) | (uint64_t)b[5];
 	}
 	}
@@ -66,7 +65,7 @@ public:
 	 * @param buf Destination buffer for MAC in big-endian byte order
 	 * @param buf Destination buffer for MAC in big-endian byte order
 	 * @param len Length of buffer, must be >= 6 or nothing is copied
 	 * @param len Length of buffer, must be >= 6 or nothing is copied
 	 */
 	 */
-	ZT_ALWAYS_INLINE void copyTo(uint8_t b[6]) const
+	ZT_ALWAYS_INLINE void copyTo(uint8_t b[6]) const noexcept
 	{
 	{
 		b[0] = (uint8_t)(_m >> 40U);
 		b[0] = (uint8_t)(_m >> 40U);
 		b[1] = (uint8_t)(_m >> 32U);
 		b[1] = (uint8_t)(_m >> 32U);
@@ -79,12 +78,12 @@ public:
 	/**
 	/**
 	 * @return True if this is broadcast (all 0xff)
 	 * @return True if this is broadcast (all 0xff)
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool isBroadcast() const { return (_m == 0xffffffffffffULL); }
+	ZT_ALWAYS_INLINE bool isBroadcast() const noexcept { return (_m == 0xffffffffffffULL); }
 
 
 	/**
 	/**
 	 * @return True if this is a multicast MAC
 	 * @return True if this is a multicast MAC
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool isMulticast() const { return ((_m & 0x010000000000ULL) != 0ULL); }
+	ZT_ALWAYS_INLINE bool isMulticast() const noexcept { return ((_m & 0x010000000000ULL) != 0ULL); }
 
 
 	/**
 	/**
 	 * Set this MAC to a MAC derived from an address and a network ID
 	 * Set this MAC to a MAC derived from an address and a network ID
@@ -92,7 +91,7 @@ public:
 	 * @param ztaddr ZeroTier address
 	 * @param ztaddr ZeroTier address
 	 * @param nwid 64-bit network ID
 	 * @param nwid 64-bit network ID
 	 */
 	 */
-	ZT_ALWAYS_INLINE void fromAddress(const Address &ztaddr,uint64_t nwid)
+	ZT_ALWAYS_INLINE void fromAddress(const Address &ztaddr,uint64_t nwid) noexcept
 	{
 	{
 		uint64_t m = ((uint64_t)firstOctetForNetwork(nwid)) << 40U;
 		uint64_t m = ((uint64_t)firstOctetForNetwork(nwid)) << 40U;
 		m |= ztaddr.toInt(); // a is 40 bits
 		m |= ztaddr.toInt(); // a is 40 bits
@@ -111,7 +110,7 @@ public:
 	 *
 	 *
 	 * @param nwid Network ID
 	 * @param nwid Network ID
 	 */
 	 */
-	ZT_ALWAYS_INLINE Address toAddress(uint64_t nwid) const
+	ZT_ALWAYS_INLINE Address toAddress(uint64_t nwid) const noexcept
 	{
 	{
 		uint64_t a = _m & 0xffffffffffULL; // least significant 40 bits of MAC are formed from address
 		uint64_t a = _m & 0xffffffffffULL; // least significant 40 bits of MAC are formed from address
 		a ^= ((nwid >> 8U) & 0xffU) << 32U; // ... XORed with bits 8-48 of the nwid in little-endian byte order, so unmask it
 		a ^= ((nwid >> 8U) & 0xffU) << 32U; // ... XORed with bits 8-48 of the nwid in little-endian byte order, so unmask it
@@ -126,7 +125,7 @@ public:
 	 * @param nwid Network ID
 	 * @param nwid Network ID
 	 * @return First octet of MAC for this network
 	 * @return First octet of MAC for this network
 	 */
 	 */
-	static ZT_ALWAYS_INLINE unsigned char firstOctetForNetwork(uint64_t nwid)
+	static ZT_ALWAYS_INLINE unsigned char firstOctetForNetwork(uint64_t nwid) noexcept
 	{
 	{
 		const uint8_t a = ((uint8_t)(nwid & 0xfeU) | 0x02U); // locally administered, not multicast, from LSB of network ID
 		const uint8_t a = ((uint8_t)(nwid & 0xfeU) | 0x02U); // locally administered, not multicast, from LSB of network ID
 		return ((a == 0x52) ? 0x32 : a); // blacklist 0x52 since it's used by KVM, libvirt, and other popular virtualization engines... seems de-facto standard on Linux
 		return ((a == 0x52) ? 0x32 : a); // blacklist 0x52 since it's used by KVM, libvirt, and other popular virtualization engines... seems de-facto standard on Linux
@@ -136,16 +135,16 @@ public:
 	 * @param i Value from 0 to 5 (inclusive)
 	 * @param i Value from 0 to 5 (inclusive)
 	 * @return Byte at said position (address interpreted in big-endian order)
 	 * @return Byte at said position (address interpreted in big-endian order)
 	 */
 	 */
-	ZT_ALWAYS_INLINE uint8_t operator[](unsigned int i) const { return (uint8_t)(_m >> (40 - (i * 8))); }
+	ZT_ALWAYS_INLINE uint8_t operator[](unsigned int i) const noexcept { return (uint8_t)(_m >> (40 - (i * 8))); }
 
 
 	/**
 	/**
 	 * @return 6, which is the number of bytes in a MAC, for container compliance
 	 * @return 6, which is the number of bytes in a MAC, for container compliance
 	 */
 	 */
-	ZT_ALWAYS_INLINE unsigned int size() const { return 6; }
+	ZT_ALWAYS_INLINE unsigned int size() const noexcept { return 6; }
 
 
-	ZT_ALWAYS_INLINE unsigned long hashCode() const { return (unsigned long)_m; }
+	ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return (unsigned long)_m; }
 
 
-	ZT_ALWAYS_INLINE char *toString(char buf[18]) const
+	ZT_ALWAYS_INLINE char *toString(char buf[18]) const noexcept
 	{
 	{
 		buf[0] = Utils::HEXCHARS[(_m >> 44U) & 0xfU];
 		buf[0] = Utils::HEXCHARS[(_m >> 44U) & 0xfU];
 		buf[1] = Utils::HEXCHARS[(_m >> 40U) & 0xfU];
 		buf[1] = Utils::HEXCHARS[(_m >> 40U) & 0xfU];
@@ -168,18 +167,18 @@ public:
 		return buf;
 		return buf;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE MAC &operator=(const uint64_t m)
+	ZT_ALWAYS_INLINE MAC &operator=(const uint64_t m) noexcept
 	{
 	{
 		_m = m & 0xffffffffffffULL;
 		_m = m & 0xffffffffffffULL;
 		return *this;
 		return *this;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE bool operator==(const MAC &m) const { return (_m == m._m); }
-	ZT_ALWAYS_INLINE bool operator!=(const MAC &m) const { return (_m != m._m); }
-	ZT_ALWAYS_INLINE bool operator<(const MAC &m) const { return (_m < m._m); }
-	ZT_ALWAYS_INLINE bool operator<=(const MAC &m) const { return (_m <= m._m); }
-	ZT_ALWAYS_INLINE bool operator>(const MAC &m) const { return (_m > m._m); }
-	ZT_ALWAYS_INLINE bool operator>=(const MAC &m) const { return (_m >= m._m); }
+	ZT_ALWAYS_INLINE bool operator==(const MAC &m) const noexcept { return (_m == m._m); }
+	ZT_ALWAYS_INLINE bool operator!=(const MAC &m) const noexcept { return (_m != m._m); }
+	ZT_ALWAYS_INLINE bool operator<(const MAC &m) const noexcept { return (_m < m._m); }
+	ZT_ALWAYS_INLINE bool operator<=(const MAC &m) const noexcept { return (_m <= m._m); }
+	ZT_ALWAYS_INLINE bool operator>(const MAC &m) const noexcept { return (_m > m._m); }
+	ZT_ALWAYS_INLINE bool operator>=(const MAC &m) const noexcept { return (_m >= m._m); }
 
 
 private:
 private:
 	uint64_t _m;
 	uint64_t _m;

+ 1 - 1
node/Membership.hpp

@@ -126,7 +126,7 @@ public:
 	/**
 	/**
 	 * Generates a key for internal use in indexing credentials by type and credential ID
 	 * Generates a key for internal use in indexing credentials by type and credential ID
 	 */
 	 */
-	static ZT_ALWAYS_INLINE uint64_t credentialKey(const ZT_CredentialType &t,const uint32_t i) { return (((uint64_t)t << 32U) | (uint64_t)i); }
+	static ZT_ALWAYS_INLINE uint64_t credentialKey(const ZT_CredentialType &t,const uint32_t i) noexcept { return (((uint64_t)t << 32U) | (uint64_t)i); }
 
 
 	AddCredentialResult addCredential(const RuntimeEnvironment *RR,void *tPtr,const Identity &sourcePeerIdentity,const NetworkConfig &nconf,const CertificateOfMembership &com);
 	AddCredentialResult addCredential(const RuntimeEnvironment *RR,void *tPtr,const Identity &sourcePeerIdentity,const NetworkConfig &nconf,const CertificateOfMembership &com);
 	AddCredentialResult addCredential(const RuntimeEnvironment *RR,void *tPtr,const Identity &sourcePeerIdentity,const NetworkConfig &nconf,const Tag &tag);
 	AddCredentialResult addCredential(const RuntimeEnvironment *RR,void *tPtr,const Identity &sourcePeerIdentity,const NetworkConfig &nconf,const Tag &tag);

+ 4 - 5
node/Meter.hpp

@@ -16,7 +16,6 @@
 
 
 #include "Constants.hpp"
 #include "Constants.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
-#include "AtomicCounter.hpp"
 
 
 #define ZT_METER_HISTORY_LENGTH 4
 #define ZT_METER_HISTORY_LENGTH 4
 #define ZT_METER_HISTORY_TICK_DURATION 1000
 #define ZT_METER_HISTORY_TICK_DURATION 1000
@@ -29,7 +28,7 @@ namespace ZeroTier {
 class Meter
 class Meter
 {
 {
 public:
 public:
-	ZT_ALWAYS_INLINE Meter()
+	ZT_ALWAYS_INLINE Meter() noexcept
 	{
 	{
 		for(int i=0;i<ZT_METER_HISTORY_LENGTH;++i)
 		for(int i=0;i<ZT_METER_HISTORY_LENGTH;++i)
 			_history[i] = 0.0;
 			_history[i] = 0.0;
@@ -38,7 +37,7 @@ public:
 	}
 	}
 
 
 	template<typename I>
 	template<typename I>
-	ZT_ALWAYS_INLINE void log(const int64_t now,I count)
+	ZT_ALWAYS_INLINE void log(const int64_t now,I count) noexcept
 	{
 	{
 		const int64_t since = now - _ts;
 		const int64_t since = now - _ts;
 		if (since >= ZT_METER_HISTORY_TICK_DURATION) {
 		if (since >= ZT_METER_HISTORY_TICK_DURATION) {
@@ -50,7 +49,7 @@ public:
 		}
 		}
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE double perSecond(const int64_t now) const
+	ZT_ALWAYS_INLINE double perSecond(const int64_t now) const noexcept
 	{
 	{
 		double r = 0.0,n = 0.0;
 		double r = 0.0,n = 0.0;
 		const int64_t since = (now - _ts);
 		const int64_t since = (now - _ts);
@@ -69,7 +68,7 @@ private:
 	volatile double _history[ZT_METER_HISTORY_LENGTH];
 	volatile double _history[ZT_METER_HISTORY_LENGTH];
 	volatile int64_t _ts;
 	volatile int64_t _ts;
 	volatile uint64_t _count;
 	volatile uint64_t _count;
-	AtomicCounter<unsigned int> _hptr;
+	std::atomic<unsigned int> _hptr;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 11 - 11
node/MulticastGroup.hpp

@@ -42,8 +42,8 @@ namespace ZeroTier {
 class MulticastGroup : public TriviallyCopyable
 class MulticastGroup : public TriviallyCopyable
 {
 {
 public:
 public:
-	ZT_ALWAYS_INLINE MulticastGroup() : _mac(),_adi(0) {}
-	ZT_ALWAYS_INLINE MulticastGroup(const MAC &m,uint32_t a) : _mac(m),_adi(a) {}
+	ZT_ALWAYS_INLINE MulticastGroup() noexcept : _mac(),_adi(0) {}
+	ZT_ALWAYS_INLINE MulticastGroup(const MAC &m,uint32_t a) noexcept : _mac(m),_adi(a) {}
 
 
 	/**
 	/**
 	 * Derive the multicast group used for address resolution (ARP/NDP) for an IP
 	 * Derive the multicast group used for address resolution (ARP/NDP) for an IP
@@ -51,7 +51,7 @@ public:
 	 * @param ip IP address (port field is ignored)
 	 * @param ip IP address (port field is ignored)
 	 * @return Multicast group for ARP/NDP
 	 * @return Multicast group for ARP/NDP
 	 */
 	 */
-	static ZT_ALWAYS_INLINE MulticastGroup deriveMulticastGroupForAddressResolution(const InetAddress &ip)
+	static ZT_ALWAYS_INLINE MulticastGroup deriveMulticastGroupForAddressResolution(const InetAddress &ip) noexcept
 	{
 	{
 		if (ip.isV4()) {
 		if (ip.isV4()) {
 			// IPv4 wants broadcast MACs, so we shove the V4 address itself into
 			// IPv4 wants broadcast MACs, so we shove the V4 address itself into
@@ -73,16 +73,16 @@ public:
 	/**
 	/**
 	 * @return Ethernet MAC portion of multicast group
 	 * @return Ethernet MAC portion of multicast group
 	 */
 	 */
-	ZT_ALWAYS_INLINE const MAC &mac() const { return _mac; }
+	ZT_ALWAYS_INLINE const MAC &mac() const noexcept { return _mac; }
 
 
 	/**
 	/**
 	 * @return Additional distinguishing information, which is normally zero except for IPv4 ARP where it's the IPv4 address
 	 * @return Additional distinguishing information, which is normally zero except for IPv4 ARP where it's the IPv4 address
 	 */
 	 */
 	ZT_ALWAYS_INLINE uint32_t adi() const { return _adi; }
 	ZT_ALWAYS_INLINE uint32_t adi() const { return _adi; }
 
 
-	ZT_ALWAYS_INLINE bool operator==(const MulticastGroup &g) const { return ((_mac == g._mac)&&(_adi == g._adi)); }
-	ZT_ALWAYS_INLINE bool operator!=(const MulticastGroup &g) const { return ((_mac != g._mac)||(_adi != g._adi)); }
-	ZT_ALWAYS_INLINE bool operator<(const MulticastGroup &g) const
+	ZT_ALWAYS_INLINE bool operator==(const MulticastGroup &g) const noexcept { return ((_mac == g._mac)&&(_adi == g._adi)); }
+	ZT_ALWAYS_INLINE bool operator!=(const MulticastGroup &g) const noexcept { return ((_mac != g._mac)||(_adi != g._adi)); }
+	ZT_ALWAYS_INLINE bool operator<(const MulticastGroup &g) const noexcept
 	{
 	{
 		if (_mac < g._mac)
 		if (_mac < g._mac)
 			return true;
 			return true;
@@ -90,11 +90,11 @@ public:
 			return (_adi < g._adi);
 			return (_adi < g._adi);
 		return false;
 		return false;
 	}
 	}
-	ZT_ALWAYS_INLINE bool operator>(const MulticastGroup &g) const { return (g < *this); }
-	ZT_ALWAYS_INLINE bool operator<=(const MulticastGroup &g) const { return !(g < *this); }
-	ZT_ALWAYS_INLINE bool operator>=(const MulticastGroup &g) const { return !(*this < g); }
+	ZT_ALWAYS_INLINE bool operator>(const MulticastGroup &g) const noexcept { return (g < *this); }
+	ZT_ALWAYS_INLINE bool operator<=(const MulticastGroup &g) const noexcept { return !(g < *this); }
+	ZT_ALWAYS_INLINE bool operator>=(const MulticastGroup &g) const noexcept { return !(*this < g); }
 
 
-	ZT_ALWAYS_INLINE unsigned long hashCode() const { return (_mac.hashCode() ^ (unsigned long)_adi); }
+	ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return (_mac.hashCode() ^ (unsigned long)_adi); }
 
 
 private:
 private:
 	MAC _mac;
 	MAC _mac;

+ 49 - 28
node/Mutex.hpp

@@ -19,34 +19,34 @@
 #include <cstdint>
 #include <cstdint>
 #include <cstdlib>
 #include <cstdlib>
 
 
-#ifdef __UNIX_LIKE__
-
+#ifndef __WINDOWS__
 #include <pthread.h>
 #include <pthread.h>
+#endif
 
 
 namespace ZeroTier {
 namespace ZeroTier {
 
 
 class Mutex
 class Mutex
 {
 {
 public:
 public:
-	ZT_ALWAYS_INLINE Mutex() { pthread_mutex_init(&_mh,0); }
-	ZT_ALWAYS_INLINE ~Mutex() { pthread_mutex_destroy(&_mh); }
+	ZT_ALWAYS_INLINE Mutex() noexcept { pthread_mutex_init(&_mh,0); }
+	ZT_ALWAYS_INLINE ~Mutex() noexcept { pthread_mutex_destroy(&_mh); }
 
 
-	ZT_ALWAYS_INLINE void lock() const { pthread_mutex_lock(&((const_cast <Mutex *> (this))->_mh)); }
-	ZT_ALWAYS_INLINE void unlock() const { pthread_mutex_unlock(&((const_cast <Mutex *> (this))->_mh)); }
+	ZT_ALWAYS_INLINE void lock() const noexcept { pthread_mutex_lock(&((const_cast <Mutex *> (this))->_mh)); }
+	ZT_ALWAYS_INLINE void unlock() const noexcept { pthread_mutex_unlock(&((const_cast <Mutex *> (this))->_mh)); }
 
 
 	class Lock
 	class Lock
 	{
 	{
 	public:
 	public:
-		ZT_ALWAYS_INLINE Lock(Mutex &m) : _m(&m) { m.lock(); }
-		ZT_ALWAYS_INLINE Lock(const Mutex &m) : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
+		ZT_ALWAYS_INLINE Lock(Mutex &m) noexcept : _m(&m) { m.lock(); }
+		ZT_ALWAYS_INLINE Lock(const Mutex &m) noexcept : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
 		ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
 		ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
 	private:
 	private:
 		Mutex *const _m;
 		Mutex *const _m;
 	};
 	};
 
 
 private:
 private:
-	ZT_ALWAYS_INLINE Mutex(const Mutex &) {}
-	ZT_ALWAYS_INLINE const Mutex &operator=(const Mutex &) { return *this; }
+	ZT_ALWAYS_INLINE Mutex(const Mutex &) noexcept {}
+	ZT_ALWAYS_INLINE const Mutex &operator=(const Mutex &) noexcept { return *this; }
 
 
 	pthread_mutex_t _mh;
 	pthread_mutex_t _mh;
 };
 };
@@ -54,47 +54,69 @@ private:
 class RWMutex
 class RWMutex
 {
 {
 public:
 public:
-	ZT_ALWAYS_INLINE RWMutex() { pthread_rwlock_init(&_mh,0); }
-	ZT_ALWAYS_INLINE ~RWMutex() { pthread_rwlock_destroy(&_mh); }
+	ZT_ALWAYS_INLINE RWMutex() noexcept { pthread_rwlock_init(&_mh,0); }
+	ZT_ALWAYS_INLINE ~RWMutex() noexcept { pthread_rwlock_destroy(&_mh); }
 
 
-	ZT_ALWAYS_INLINE void lock() const { pthread_rwlock_wrlock(&((const_cast <RWMutex *> (this))->_mh)); }
-	ZT_ALWAYS_INLINE void rlock() const { pthread_rwlock_rdlock(&((const_cast <RWMutex *> (this))->_mh)); }
-	ZT_ALWAYS_INLINE void unlock() const { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
-	ZT_ALWAYS_INLINE void runlock() const { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
+	ZT_ALWAYS_INLINE void lock() const noexcept { pthread_rwlock_wrlock(&((const_cast <RWMutex *> (this))->_mh)); }
+	ZT_ALWAYS_INLINE void rlock() const noexcept { pthread_rwlock_rdlock(&((const_cast <RWMutex *> (this))->_mh)); }
+	ZT_ALWAYS_INLINE void unlock() const noexcept { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
+	ZT_ALWAYS_INLINE void runlock() const noexcept { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
 
 
+	/**
+	 * RAAI locker that acquires only the read lock (shared read)
+	 */
 	class RLock
 	class RLock
 	{
 	{
 	public:
 	public:
-		ZT_ALWAYS_INLINE RLock(RWMutex &m) : _m(&m) { m.rlock(); }
-		ZT_ALWAYS_INLINE RLock(const RWMutex &m) : _m(const_cast<RWMutex *>(&m)) { _m->rlock(); }
+		ZT_ALWAYS_INLINE RLock(RWMutex &m) noexcept : _m(&m) { m.rlock(); }
+		ZT_ALWAYS_INLINE RLock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)) { _m->rlock(); }
 		ZT_ALWAYS_INLINE ~RLock() { _m->runlock(); }
 		ZT_ALWAYS_INLINE ~RLock() { _m->runlock(); }
 	private:
 	private:
 		RWMutex *const _m;
 		RWMutex *const _m;
 	};
 	};
 
 
+	/**
+	 * RAAI locker that acquires the write lock (exclusive write, no readers)
+	 */
 	class Lock
 	class Lock
 	{
 	{
 	public:
 	public:
-		ZT_ALWAYS_INLINE Lock(RWMutex &m) : _m(&m) { m.lock(); }
-		ZT_ALWAYS_INLINE Lock(const RWMutex &m) : _m(const_cast<RWMutex *>(&m)) { _m->lock(); }
+		ZT_ALWAYS_INLINE Lock(RWMutex &m) noexcept : _m(&m) { m.lock(); }
+		ZT_ALWAYS_INLINE Lock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)) { _m->lock(); }
 		ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
 		ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
 	private:
 	private:
 		RWMutex *const _m;
 		RWMutex *const _m;
 	};
 	};
 
 
+	/**
+	 * RAAI locker that acquires the read lock first and can switch modes
+	 *
+	 * Use writing() to acquire the write lock if not already acquired. Use reading() to
+	 * let go of the write lock and go back to only holding the read lock.
+	 */
+	class RMaybeWLock
+	{
+	public:
+		ZT_ALWAYS_INLINE RMaybeWLock(RWMutex &m) noexcept : _m(&m),_w(false) { m.rlock(); }
+		ZT_ALWAYS_INLINE RMaybeWLock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)),_w(false) { _m->rlock(); }
+		ZT_ALWAYS_INLINE void writing() noexcept { if (!_w) { _w = true; _m->runlock(); _m->lock(); } }
+		ZT_ALWAYS_INLINE void reading() noexcept { if (_w) { _w = false; _m->unlock(); _m->rlock(); } }
+		ZT_ALWAYS_INLINE ~RMaybeWLock() { if (_w) _m->unlock(); else _m->runlock(); }
+	private:
+		RWMutex *const _m;
+		bool _w;
+	};
+
 private:
 private:
-	ZT_ALWAYS_INLINE RWMutex(const RWMutex &) {}
-	ZT_ALWAYS_INLINE const RWMutex &operator=(const RWMutex &) { return *this; }
+	ZT_ALWAYS_INLINE RWMutex(const RWMutex &) noexcept {}
+	ZT_ALWAYS_INLINE const RWMutex &operator=(const RWMutex &) noexcept { return *this; }
 
 
 	pthread_rwlock_t _mh;
 	pthread_rwlock_t _mh;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier
 
 
-#endif
-
-#ifdef __WINDOWS__
-
+#if 0
 #include <Windows.h>
 #include <Windows.h>
 
 
 namespace ZeroTier {
 namespace ZeroTier {
@@ -127,7 +149,6 @@ private:
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier
-
-#endif // _WIN32
+#endif
 
 
 #endif
 #endif

+ 7 - 7
node/Network.cpp

@@ -34,7 +34,7 @@ namespace ZeroTier {
 namespace {
 namespace {
 
 
 // Returns true if packet appears valid; pos and proto will be set
 // Returns true if packet appears valid; pos and proto will be set
-bool _ipv6GetPayload(const uint8_t *frameData,unsigned int frameLen,unsigned int &pos,unsigned int &proto)
+bool _ipv6GetPayload(const uint8_t *frameData,unsigned int frameLen,unsigned int &pos,unsigned int &proto) noexcept
 {
 {
 	if (frameLen < 40)
 	if (frameLen < 40)
 		return false;
 		return false;
@@ -75,10 +75,10 @@ _doZtFilterResult _doZtFilter(
 	const RuntimeEnvironment *RR,
 	const RuntimeEnvironment *RR,
 	Trace::RuleResultLog &rrl,
 	Trace::RuleResultLog &rrl,
 	const NetworkConfig &nconf,
 	const NetworkConfig &nconf,
-	const Membership *membership, // can be NULL
+	const Membership *membership,       // can be NULL
 	const bool inbound,
 	const bool inbound,
 	const Address &ztSource,
 	const Address &ztSource,
-	Address &ztDest, // MUTABLE -- is changed on REDIRECT actions
+	Address &ztDest,                    // MUTABLE -- is changed on REDIRECT actions
 	const MAC &macSource,
 	const MAC &macSource,
 	const MAC &macDest,
 	const MAC &macDest,
 	const uint8_t *const frameData,
 	const uint8_t *const frameData,
@@ -87,10 +87,10 @@ _doZtFilterResult _doZtFilter(
 	const unsigned int vlanId,
 	const unsigned int vlanId,
 	const ZT_VirtualNetworkRule *rules, // cannot be NULL
 	const ZT_VirtualNetworkRule *rules, // cannot be NULL
 	const unsigned int ruleCount,
 	const unsigned int ruleCount,
-	Address &cc, // MUTABLE -- set to TEE destination if TEE action is taken or left alone otherwise
-	unsigned int &ccLength, // MUTABLE -- set to length of packet payload to TEE
-	bool &ccWatch, // MUTABLE -- set to true for WATCH target as opposed to normal TEE
-	uint8_t &qosBucket) // MUTABLE -- set to the value of the argument provided to PRIORITY
+	Address &cc,                        // MUTABLE -- set to TEE destination if TEE action is taken or left alone otherwise
+	unsigned int &ccLength,             // MUTABLE -- set to length of packet payload to TEE
+	bool &ccWatch,                      // MUTABLE -- set to true for WATCH target as opposed to normal TEE
+	uint8_t &qosBucket) noexcept        // MUTABLE -- set to the value of the argument provided to PRIORITY
 {
 {
 	// Set to true if we are a TEE/REDIRECT/WATCH target
 	// Set to true if we are a TEE/REDIRECT/WATCH target
 	bool superAccept = false;
 	bool superAccept = false;

+ 13 - 14
node/Network.hpp

@@ -26,7 +26,6 @@
 #include "Address.hpp"
 #include "Address.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
 #include "SharedPtr.hpp"
 #include "SharedPtr.hpp"
-#include "AtomicCounter.hpp"
 #include "MulticastGroup.hpp"
 #include "MulticastGroup.hpp"
 #include "MAC.hpp"
 #include "MAC.hpp"
 #include "Buf.hpp"
 #include "Buf.hpp"
@@ -58,7 +57,7 @@ public:
 	/**
 	/**
 	 * Compute primary controller device ID from network ID
 	 * Compute primary controller device ID from network ID
 	 */
 	 */
-	static ZT_ALWAYS_INLINE Address controllerFor(uint64_t nwid) { return Address(nwid >> 24U); }
+	static ZT_ALWAYS_INLINE Address controllerFor(uint64_t nwid) noexcept { return Address(nwid >> 24U); }
 
 
 	/**
 	/**
 	 * Construct a new network
 	 * Construct a new network
@@ -76,14 +75,14 @@ public:
 
 
 	~Network();
 	~Network();
 
 
-	ZT_ALWAYS_INLINE uint64_t id() const { return _id; }
-	ZT_ALWAYS_INLINE Address controller() const { return Address(_id >> 24U); }
-	ZT_ALWAYS_INLINE bool multicastEnabled() const { return (_config.multicastLimit > 0); }
-	ZT_ALWAYS_INLINE bool hasConfig() const { return (_config); }
-	ZT_ALWAYS_INLINE uint64_t lastConfigUpdate() const { return _lastConfigUpdate; }
-	ZT_ALWAYS_INLINE ZT_VirtualNetworkStatus status() const { return _status(); }
-	ZT_ALWAYS_INLINE const NetworkConfig &config() const { return _config; }
-	ZT_ALWAYS_INLINE const MAC &mac() const { return _mac; }
+	ZT_ALWAYS_INLINE uint64_t id() const noexcept { return _id; }
+	ZT_ALWAYS_INLINE Address controller() const noexcept { return Address(_id >> 24U); }
+	ZT_ALWAYS_INLINE bool multicastEnabled() const noexcept { return (_config.multicastLimit > 0); }
+	ZT_ALWAYS_INLINE bool hasConfig() const noexcept { return (_config); }
+	ZT_ALWAYS_INLINE uint64_t lastConfigUpdate() const noexcept { return _lastConfigUpdate; }
+	ZT_ALWAYS_INLINE ZT_VirtualNetworkStatus status() const noexcept { return _status(); }
+	ZT_ALWAYS_INLINE const NetworkConfig &config() const noexcept { return _config; }
+	ZT_ALWAYS_INLINE const MAC &mac() const noexcept { return _mac; }
 
 
 	/**
 	/**
 	 * Apply filters to an outgoing packet
 	 * Apply filters to an outgoing packet
@@ -215,12 +214,12 @@ public:
 	/**
 	/**
 	 * Set netconf failure to 'access denied' -- called in IncomingPacket when controller reports this
 	 * Set netconf failure to 'access denied' -- called in IncomingPacket when controller reports this
 	 */
 	 */
-	ZT_ALWAYS_INLINE void setAccessDenied() { _netconfFailure = NETCONF_FAILURE_ACCESS_DENIED; }
+	ZT_ALWAYS_INLINE void setAccessDenied() noexcept { _netconfFailure = NETCONF_FAILURE_ACCESS_DENIED; }
 
 
 	/**
 	/**
 	 * Set netconf failure to 'not found' -- called by IncomingPacket when controller reports this
 	 * Set netconf failure to 'not found' -- called by IncomingPacket when controller reports this
 	 */
 	 */
-	ZT_ALWAYS_INLINE void setNotFound() { _netconfFailure = NETCONF_FAILURE_NOT_FOUND; }
+	ZT_ALWAYS_INLINE void setNotFound() noexcept { _netconfFailure = NETCONF_FAILURE_NOT_FOUND; }
 
 
 	/**
 	/**
 	 * Determine whether this peer is permitted to communicate on this network
 	 * Determine whether this peer is permitted to communicate on this network
@@ -355,7 +354,7 @@ public:
 	/**
 	/**
 	 * @return Externally usable pointer-to-pointer exported via the core API
 	 * @return Externally usable pointer-to-pointer exported via the core API
 	 */
 	 */
-	ZT_ALWAYS_INLINE void **userPtr() { return &_uPtr; }
+	ZT_ALWAYS_INLINE void **userPtr() noexcept { return &_uPtr; }
 
 
 private:
 private:
 	void _requestConfiguration(void *tPtr);
 	void _requestConfiguration(void *tPtr);
@@ -403,7 +402,7 @@ private:
 	Mutex _config_l;
 	Mutex _config_l;
 	Mutex _memberships_l;
 	Mutex _memberships_l;
 
 
-	AtomicCounter<int> __refCount;
+	std::atomic<int> __refCount;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 9 - 2
node/NetworkConfig.cpp

@@ -32,6 +32,7 @@ bool NetworkConfig::toDictionary(Dictionary &d,bool includeLegacy) const
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,this->credentialTimeMaxDelta);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,this->credentialTimeMaxDelta);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_REVISION,this->revision);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_REVISION,this->revision);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,this->issuedTo.toString((char *)tmp));
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,this->issuedTo.toString((char *)tmp));
+		d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH,this->issuedToIdentityHash,ZT_IDENTITY_HASH_SIZE);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,this->flags);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,this->flags);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT,(uint64_t)this->multicastLimit);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT,(uint64_t)this->multicastLimit);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_TYPE,(uint16_t)this->type);
 		d.add(ZT_NETWORKCONFIG_DICT_KEY_TYPE,(uint16_t)this->type);
@@ -119,6 +120,12 @@ bool NetworkConfig::fromDictionary(const Dictionary &d)
 		this->credentialTimeMaxDelta = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,0);
 		this->credentialTimeMaxDelta = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,0);
 		this->revision = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_REVISION,0);
 		this->revision = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_REVISION,0);
 		this->issuedTo = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,0);
 		this->issuedTo = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,0);
+		const std::vector<uint8_t> *blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH]);
+		if (blob->size() == ZT_IDENTITY_HASH_SIZE) {
+			memcpy(this->issuedToIdentityHash,blob->data(),ZT_IDENTITY_HASH_SIZE);
+		} else {
+			memset(this->issuedToIdentityHash,0,ZT_IDENTITY_HASH_SIZE);
+		}
 		if (!this->issuedTo)
 		if (!this->issuedTo)
 			return false;
 			return false;
 		this->multicastLimit = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT,0);
 		this->multicastLimit = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT,0);
@@ -135,7 +142,7 @@ bool NetworkConfig::fromDictionary(const Dictionary &d)
 			this->flags = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,0);
 			this->flags = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,0);
 			this->type = (ZT_VirtualNetworkType)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TYPE,(uint64_t)ZT_NETWORK_TYPE_PRIVATE);
 			this->type = (ZT_VirtualNetworkType)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TYPE,(uint64_t)ZT_NETWORK_TYPE_PRIVATE);
 
 
-			const std::vector<uint8_t> *blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_COM]);
+			blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_COM]);
 			if (!blob->empty()) {
 			if (!blob->empty()) {
 				if (this->com.unmarshal(blob->data(),(int)(blob->size()) < 0))
 				if (this->com.unmarshal(blob->data(),(int)(blob->size()) < 0))
 					return false;
 					return false;
@@ -248,7 +255,7 @@ bool NetworkConfig::fromDictionary(const Dictionary &d)
 	return false;
 	return false;
 }
 }
 
 
-bool NetworkConfig::addSpecialist(const Address &a,const uint64_t f)
+bool NetworkConfig::addSpecialist(const Address &a,const uint64_t f) noexcept
 {
 {
 	const uint64_t aint = a.toInt();
 	const uint64_t aint = a.toInt();
 	for(unsigned int i=0;i<specialistCount;++i) {
 	for(unsigned int i=0;i<specialistCount;++i) {

+ 12 - 10
node/NetworkConfig.hpp

@@ -124,6 +124,8 @@ namespace ZeroTier {
 #define ZT_NETWORKCONFIG_DICT_KEY_REVISION "r"
 #define ZT_NETWORKCONFIG_DICT_KEY_REVISION "r"
 // address of member
 // address of member
 #define ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO "id"
 #define ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO "id"
+// full identity hash of member
+#define ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH "IDH"
 // flags(hex)
 // flags(hex)
 #define ZT_NETWORKCONFIG_DICT_KEY_FLAGS "f"
 #define ZT_NETWORKCONFIG_DICT_KEY_FLAGS "f"
 // integer(hex)
 // integer(hex)
@@ -161,7 +163,7 @@ namespace ZeroTier {
  */
  */
 struct NetworkConfig : TriviallyCopyable
 struct NetworkConfig : TriviallyCopyable
 {
 {
-	ZT_ALWAYS_INLINE NetworkConfig() { memoryZero(this); }
+	ZT_ALWAYS_INLINE NetworkConfig() noexcept { memoryZero(this); }
 
 
 	/**
 	/**
 	 * Write this network config to a dictionary for transport
 	 * Write this network config to a dictionary for transport
@@ -183,28 +185,28 @@ struct NetworkConfig : TriviallyCopyable
 	/**
 	/**
 	 * @return True if broadcast (ff:ff:ff:ff:ff:ff) address should work on this network
 	 * @return True if broadcast (ff:ff:ff:ff:ff:ff) address should work on this network
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool enableBroadcast() const { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST) != 0); }
+	ZT_ALWAYS_INLINE bool enableBroadcast() const noexcept { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST) != 0); }
 
 
 	/**
 	/**
 	 * @return True if IPv6 NDP emulation should be allowed for certain "magic" IPv6 address patterns
 	 * @return True if IPv6 NDP emulation should be allowed for certain "magic" IPv6 address patterns
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool ndpEmulation() const { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION) != 0); }
+	ZT_ALWAYS_INLINE bool ndpEmulation() const noexcept { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION) != 0); }
 
 
 	/**
 	/**
 	 * @return Network type is public (no access control)
 	 * @return Network type is public (no access control)
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool isPublic() const { return (this->type == ZT_NETWORK_TYPE_PUBLIC); }
+	ZT_ALWAYS_INLINE bool isPublic() const noexcept { return (this->type == ZT_NETWORK_TYPE_PUBLIC); }
 
 
 	/**
 	/**
 	 * @return Network type is private (certificate access control)
 	 * @return Network type is private (certificate access control)
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool isPrivate() const { return (this->type == ZT_NETWORK_TYPE_PRIVATE); }
+	ZT_ALWAYS_INLINE bool isPrivate() const noexcept { return (this->type == ZT_NETWORK_TYPE_PRIVATE); }
 
 
 	/**
 	/**
 	 * @param fromPeer Peer attempting to bridge other Ethernet peers onto network
 	 * @param fromPeer Peer attempting to bridge other Ethernet peers onto network
 	 * @return True if this network allows bridging
 	 * @return True if this network allows bridging
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool permitsBridging(const Address &fromPeer) const
+	ZT_ALWAYS_INLINE bool permitsBridging(const Address &fromPeer) const noexcept
 	{
 	{
 		for(unsigned int i=0;i<specialistCount;++i) {
 		for(unsigned int i=0;i<specialistCount;++i) {
 			if ((fromPeer == specialists[i])&&((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE) != 0))
 			if ((fromPeer == specialists[i])&&((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE) != 0))
@@ -213,9 +215,9 @@ struct NetworkConfig : TriviallyCopyable
 		return false;
 		return false;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE operator bool() const { return (networkId != 0); }
-	ZT_ALWAYS_INLINE bool operator==(const NetworkConfig &nc) const { return (memcmp(this,&nc,sizeof(NetworkConfig)) == 0); }
-	ZT_ALWAYS_INLINE bool operator!=(const NetworkConfig &nc) const { return (!(*this == nc)); }
+	ZT_ALWAYS_INLINE operator bool() const noexcept { return (networkId != 0); }
+	ZT_ALWAYS_INLINE bool operator==(const NetworkConfig &nc) const noexcept { return (memcmp(this,&nc,sizeof(NetworkConfig)) == 0); }
+	ZT_ALWAYS_INLINE bool operator!=(const NetworkConfig &nc) const noexcept { return (!(*this == nc)); }
 
 
 	/**
 	/**
 	 * Add a specialist or mask flags if already present
 	 * Add a specialist or mask flags if already present
@@ -227,7 +229,7 @@ struct NetworkConfig : TriviallyCopyable
 	 * @param f Flags (OR of specialist role/type flags)
 	 * @param f Flags (OR of specialist role/type flags)
 	 * @return True if successfully masked or added
 	 * @return True if successfully masked or added
 	 */
 	 */
-	bool addSpecialist(const Address &a,uint64_t f);
+	bool addSpecialist(const Address &a,uint64_t f) noexcept;
 
 
 	ZT_ALWAYS_INLINE const Capability *capability(const uint32_t id) const
 	ZT_ALWAYS_INLINE const Capability *capability(const uint32_t id) const
 	{
 	{

+ 46 - 16
node/Node.cpp

@@ -19,7 +19,6 @@
 #include "SharedPtr.hpp"
 #include "SharedPtr.hpp"
 #include "Node.hpp"
 #include "Node.hpp"
 #include "NetworkController.hpp"
 #include "NetworkController.hpp"
-#include "Switch.hpp"
 #include "Topology.hpp"
 #include "Topology.hpp"
 #include "Address.hpp"
 #include "Address.hpp"
 #include "Identity.hpp"
 #include "Identity.hpp"
@@ -44,6 +43,7 @@ Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64
 	_lastHousekeepingRun(0),
 	_lastHousekeepingRun(0),
 	_lastNetworkHousekeepingRun(0),
 	_lastNetworkHousekeepingRun(0),
 	_lastPathKeepaliveCheck(0),
 	_lastPathKeepaliveCheck(0),
+	_natMustDie(true),
 	_online(false)
 	_online(false)
 {
 {
 	_networks.resize(64); // _networksMask + 1, must be power of two
 	_networks.resize(64); // _networksMask + 1, must be power of two
@@ -78,6 +78,7 @@ Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64
 			stateObjectPut(tPtr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,RR->publicIdentityStr,(unsigned int)strlen(RR->publicIdentityStr));
 			stateObjectPut(tPtr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,RR->publicIdentityStr,(unsigned int)strlen(RR->publicIdentityStr));
 	}
 	}
 
 
+#if 0
 	char *m = nullptr;
 	char *m = nullptr;
 	try {
 	try {
 		m = reinterpret_cast<char *>(malloc(16 + sizeof(Trace) + sizeof(Switch) + sizeof(Topology) + sizeof(SelfAwareness)));
 		m = reinterpret_cast<char *>(malloc(16 + sizeof(Trace) + sizeof(Switch) + sizeof(Topology) + sizeof(SelfAwareness)));
@@ -100,27 +101,34 @@ Node::Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64
 		if (m) ::free(m);
 		if (m) ::free(m);
 		throw;
 		throw;
 	}
 	}
+#endif
 
 
 	postEvent(tPtr, ZT_EVENT_UP);
 	postEvent(tPtr, ZT_EVENT_UP);
 }
 }
 
 
 Node::~Node()
 Node::~Node()
 {
 {
+	// Let go of all networks to leave them. Do it this way in case Network wants to
+	// do anything in its destructor that locks the _networks lock to avoid a deadlock.
+	std::vector< SharedPtr<Network> > networks;
 	{
 	{
 		RWMutex::Lock _l(_networks_m);
 		RWMutex::Lock _l(_networks_m);
-		for(std::vector< SharedPtr<Network> >::iterator i(_networks.begin());i!=_networks.end();++i)
-			i->zero();
+		networks.swap(_networks);
 	}
 	}
+	networks.clear();
+	_networks_m.lock();
+	_networks_m.unlock();
 
 
 	if (RR->sa) RR->sa->~SelfAwareness();
 	if (RR->sa) RR->sa->~SelfAwareness();
 	if (RR->topology) RR->topology->~Topology();
 	if (RR->topology) RR->topology->~Topology();
-	if (RR->sw) RR->sw->~Switch();
 	if (RR->t) RR->t->~Trace();
 	if (RR->t) RR->t->~Trace();
 	free(RR->rtmem);
 	free(RR->rtmem);
 
 
-	// Let go of cached Buf objects. This does no harm if other nodes are running
-	// but usually that won't be the case.
-	freeBufPool();
+	// Let go of cached Buf objects. If other nodes happen to be running in this
+	// same process space new Bufs will be allocated as needed, but this is almost
+	// never the case. Calling this here saves RAM if we are running inside something
+	// that wants to keep running after tearing down its ZeroTier core instance.
+	Buf::freePool();
 }
 }
 
 
 void Node::shutdown(void *tPtr)
 void Node::shutdown(void *tPtr)
@@ -138,7 +146,7 @@ ZT_ResultCode Node::processWirePacket(
 	volatile int64_t *nextBackgroundTaskDeadline)
 	volatile int64_t *nextBackgroundTaskDeadline)
 {
 {
 	_now = now;
 	_now = now;
-	RR->sw->onRemotePacket(tptr,localSocket,*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
+	//RR->sw->onRemotePacket(tptr,localSocket,*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
 	return ZT_RESULT_OK;
 	return ZT_RESULT_OK;
 }
 }
 
 
@@ -157,7 +165,7 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
 	_now = now;
 	_now = now;
 	SharedPtr<Network> nw(this->network(nwid));
 	SharedPtr<Network> nw(this->network(nwid));
 	if (nw) {
 	if (nw) {
-		RR->sw->onLocalEthernet(tptr,nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
+		//RR->sw->onLocalEthernet(tptr,nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
 		return ZT_RESULT_OK;
 		return ZT_RESULT_OK;
 	} else {
 	} else {
 		return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
 		return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
@@ -184,7 +192,7 @@ struct _processBackgroundTasks_ping_eachPeer
 	}
 	}
 };
 };
 
 
-static uint8_t junk = 0; // junk payload for keepalive packets
+static uint8_t keepAlivePayload = 0; // junk payload for keepalive packets
 struct _processBackgroundTasks_path_keepalive
 struct _processBackgroundTasks_path_keepalive
 {
 {
 	int64_t now;
 	int64_t now;
@@ -193,9 +201,8 @@ struct _processBackgroundTasks_path_keepalive
 	ZT_ALWAYS_INLINE void operator()(const SharedPtr<Path> &path)
 	ZT_ALWAYS_INLINE void operator()(const SharedPtr<Path> &path)
 	{
 	{
 		if ((now - path->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
 		if ((now - path->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
-			++junk;
-			path->send(RR,tPtr,&junk,sizeof(junk),now);
-			path->sent(now);
+			++keepAlivePayload;
+			path->send(RR,tPtr,&keepAlivePayload,1,now);
 		}
 		}
 	}
 	}
 };
 };
@@ -227,8 +234,9 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
 				// This will give us updated locators for these roots which may contain new
 				// This will give us updated locators for these roots which may contain new
 				// IP addresses. It will also auto-discover IPs for roots that were not added
 				// IP addresses. It will also auto-discover IPs for roots that were not added
 				// with an initial bootstrap address.
 				// with an initial bootstrap address.
-				for (std::vector<Address>::const_iterator r(pf.rootsNotOnline.begin()); r != pf.rootsNotOnline.end(); ++r)
-					RR->sw->requestWhois(tPtr,now,*r);
+				// TODO
+				//for (std::vector<Address>::const_iterator r(pf.rootsNotOnline.begin()); r != pf.rootsNotOnline.end(); ++r)
+				//	RR->sw->requestWhois(tPtr,now,*r);
 			}
 			}
 		} catch ( ... ) {
 		} catch ( ... ) {
 			return ZT_RESULT_FATAL_ERROR_INTERNAL;
 			return ZT_RESULT_FATAL_ERROR_INTERNAL;
@@ -281,8 +289,30 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
 		RR->topology->eachPath<_processBackgroundTasks_path_keepalive &>(pf);
 		RR->topology->eachPath<_processBackgroundTasks_path_keepalive &>(pf);
 	}
 	}
 
 
+	int64_t earliestAlarmAt = 0x7fffffffffffffffLL;
+	std::vector<Address> bzzt;
+	{
+		RWMutex::RMaybeWLock l(_peerAlarms_l);
+		for(std::map<Address,int64_t>::iterator a(_peerAlarms.begin());a!=_peerAlarms.end();) {
+			if (now >= a->second) {
+				bzzt.push_back(a->first);
+				l.write(); // acquire write lock if not already in write mode
+				_peerAlarms.erase(a++);
+			} else {
+				if (a->second < earliestAlarmAt)
+					earliestAlarmAt = a->second;
+				++a;
+			}
+		}
+	}
+	for(std::vector<Address>::iterator a(bzzt.begin());a!=bzzt.end();++a) {
+		const SharedPtr<Peer> p(RR->topology->peer(tPtr,*a,false));
+		if (p)
+			p->alarm(tPtr,now);
+	}
+
 	try {
 	try {
-		*nextBackgroundTaskDeadline = now + (int64_t)std::max(std::min((unsigned long)ZT_MAX_TIMER_TASK_INTERVAL,RR->sw->doTimerTasks(tPtr, now)), (unsigned long)ZT_MIN_TIMER_TASK_INTERVAL);
+		*nextBackgroundTaskDeadline = std::min(earliestAlarmAt,now + ZT_MAX_TIMER_TASK_INTERVAL);
 	} catch ( ... ) {
 	} catch ( ... ) {
 		return ZT_RESULT_FATAL_ERROR_INTERNAL;
 		return ZT_RESULT_FATAL_ERROR_INTERNAL;
 	}
 	}

+ 47 - 15
node/Node.hpp

@@ -14,12 +14,6 @@
 #ifndef ZT_NODE_HPP
 #ifndef ZT_NODE_HPP
 #define ZT_NODE_HPP
 #define ZT_NODE_HPP
 
 
-#include <cstdio>
-#include <cstdlib>
-#include <cstring>
-
-#include <vector>
-
 #include "Constants.hpp"
 #include "Constants.hpp"
 #include "RuntimeEnvironment.hpp"
 #include "RuntimeEnvironment.hpp"
 #include "InetAddress.hpp"
 #include "InetAddress.hpp"
@@ -31,6 +25,12 @@
 #include "NetworkController.hpp"
 #include "NetworkController.hpp"
 #include "Hashtable.hpp"
 #include "Hashtable.hpp"
 
 
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <vector>
+#include <map>
+
 // Bit mask for "expecting reply" hash
 // Bit mask for "expecting reply" hash
 #define ZT_EXPECTING_REPLIES_BUCKET_MASK1 255
 #define ZT_EXPECTING_REPLIES_BUCKET_MASK1 255
 #define ZT_EXPECTING_REPLIES_BUCKET_MASK2 31
 #define ZT_EXPECTING_REPLIES_BUCKET_MASK2 31
@@ -327,6 +327,11 @@ public:
 		return false;
 		return false;
 	}
 	}
 
 
+	/**
+	 * @return True if aggressive NAT-traversal mechanisms like scanning of <1024 ports are enabled
+	 */
+	ZT_ALWAYS_INLINE bool natMustDie() const { return _natMustDie; }
+
 	/**
 	/**
 	 * Check whether we should do potentially expensive identity verification (rate limit)
 	 * Check whether we should do potentially expensive identity verification (rate limit)
 	 *
 	 *
@@ -344,6 +349,20 @@ public:
 		return false;
 		return false;
 	}
 	}
 
 
+	/**
+	 * Wake any peers with the given address by calling their alarm() methods at or after the specified time
+	 *
+	 * @param peerAddress Peer address
+	 * @param triggerTime Time alarm should go off
+	 */
+	ZT_ALWAYS_INLINE void setPeerAlarm(const Address &peerAddress,const int64_t triggerTime)
+	{
+		RWMutex::Lock l(_peerAlarms_l);
+		int64_t &t = _peerAlarms[peerAddress];
+		if ((t <= 0)||(t > triggerTime))
+			t = triggerTime;
+	}
+
 	/**
 	/**
 	 * Check whether a local controller has authorized a member on a network
 	 * Check whether a local controller has authorized a member on a network
 	 *
 	 *
@@ -370,18 +389,27 @@ private:
 	ZT_Node_Callbacks _cb;
 	ZT_Node_Callbacks _cb;
 	void *_uPtr; // _uptr (lower case) is reserved in Visual Studio :P
 	void *_uPtr; // _uptr (lower case) is reserved in Visual Studio :P
 
 
-	// For tracking packet IDs to filter out OK/ERROR replies to packets we did not send
+	// For tracking packet IDs to filter out OK/ERROR replies to packets we did not send.
 	volatile uint8_t _expectingRepliesToBucketPtr[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1];
 	volatile uint8_t _expectingRepliesToBucketPtr[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1];
 	volatile uint32_t _expectingRepliesTo[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1][ZT_EXPECTING_REPLIES_BUCKET_MASK2 + 1];
 	volatile uint32_t _expectingRepliesTo[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1][ZT_EXPECTING_REPLIES_BUCKET_MASK2 + 1];
 
 
 	// Time of last identity verification indexed by InetAddress.rateGateHash() -- used in IncomingPacket::_doHELLO() via rateGateIdentityVerification()
 	// Time of last identity verification indexed by InetAddress.rateGateHash() -- used in IncomingPacket::_doHELLO() via rateGateIdentityVerification()
 	volatile int64_t _lastIdentityVerification[16384];
 	volatile int64_t _lastIdentityVerification[16384];
 
 
-	/* Map that remembers if we have recently sent a network config to someone
-	 * querying us as a controller. This is an optimization to allow network
-	 * controllers to know whether to treat things like multicast queries the
-	 * way authorized members would be treated without requiring an extra cert
-	 * validation. */
+	// Addresses of peers that want to have their alarm() function called at some point in the future.
+	// These behave like weak references in that the node looks them up in Topology and calls alarm()
+	// in each peer if that peer object is still held in memory. Calling alarm() unnecessarily on a peer
+	// is harmless. This just exists as an optimization to prevent having to iterate through all peers
+	// on every processBackgroundTasks call. A simple map<> is used here because there are usually only
+	// a few of these, if any, and it's slightly faster and lower memory in that case than a Hashtable.
+	std::map<Address,int64_t> _peerAlarms;
+	RWMutex _peerAlarms_l;
+
+	// Map that remembers if we have recently sent a network config to someone
+	// querying us as a controller. This is an optimization to allow network
+	// controllers to know whether to treat things like multicast queries the
+	// way authorized members would be treated without requiring an extra cert
+	// validation.
 	struct _LocalControllerAuth
 	struct _LocalControllerAuth
 	{
 	{
 		uint64_t nwid,address;
 		uint64_t nwid,address;
@@ -391,17 +419,20 @@ private:
 		ZT_ALWAYS_INLINE bool operator!=(const _LocalControllerAuth &a) const { return ((a.nwid != nwid)||(a.address != address)); }
 		ZT_ALWAYS_INLINE bool operator!=(const _LocalControllerAuth &a) const { return ((a.nwid != nwid)||(a.address != address)); }
 	};
 	};
 	Hashtable< _LocalControllerAuth,int64_t > _localControllerAuthorizations;
 	Hashtable< _LocalControllerAuth,int64_t > _localControllerAuthorizations;
+	Mutex _localControllerAuthorizations_m;
 
 
 	// Networks are stored in a flat hash table that is resized on any network ID collision. This makes
 	// Networks are stored in a flat hash table that is resized on any network ID collision. This makes
 	// network lookup by network ID a few bitwise ops and an array index.
 	// network lookup by network ID a few bitwise ops and an array index.
 	std::vector< SharedPtr<Network> > _networks;
 	std::vector< SharedPtr<Network> > _networks;
 	uint64_t _networksMask;
 	uint64_t _networksMask;
+	RWMutex _networks_m;
 
 
+	// These are local interface addresses that have been configured via the API
+	// and can be pushed to other nodes.
 	std::vector< ZT_InterfaceAddress > _localInterfaceAddresses;
 	std::vector< ZT_InterfaceAddress > _localInterfaceAddresses;
-
-	Mutex _localControllerAuthorizations_m;
-	RWMutex _networks_m;
 	Mutex _localInterfaceAddresses_m;
 	Mutex _localInterfaceAddresses_m;
+
+	// This is locked while running processBackgroundTasks to ensure that calls to it are not concurrent.
 	Mutex _backgroundTasksLock;
 	Mutex _backgroundTasksLock;
 
 
 	volatile int64_t _now;
 	volatile int64_t _now;
@@ -409,6 +440,7 @@ private:
 	volatile int64_t _lastHousekeepingRun;
 	volatile int64_t _lastHousekeepingRun;
 	volatile int64_t _lastNetworkHousekeepingRun;
 	volatile int64_t _lastNetworkHousekeepingRun;
 	volatile int64_t _lastPathKeepaliveCheck;
 	volatile int64_t _lastPathKeepaliveCheck;
+	volatile bool _natMustDie;
 	volatile bool _online;
 	volatile bool _online;
 };
 };
 
 

+ 3 - 0
node/OS.hpp

@@ -138,13 +138,16 @@
 #endif
 #endif
 
 
 #if __cplusplus > 199711L
 #if __cplusplus > 199711L
+#include <atomic>
 #ifndef __CPP11__
 #ifndef __CPP11__
 #define __CPP11__
 #define __CPP11__
 #endif
 #endif
 #endif
 #endif
 #ifndef __CPP11__
 #ifndef __CPP11__
+/* TODO: will need some kind of very basic atomic<> implemenation if we want to compile on pre-c++11 compilers */
 #define nullptr (0)
 #define nullptr (0)
 #define constexpr ZT_ALWAYS_INLINE
 #define constexpr ZT_ALWAYS_INLINE
+#define noexcept throw()
 #endif
 #endif
 
 
 #ifdef SOCKET
 #ifdef SOCKET

+ 2 - 2
node/Path.cpp

@@ -17,7 +17,7 @@
 
 
 namespace ZeroTier {
 namespace ZeroTier {
 
 
-bool Path::send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now)
+bool Path::send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now) noexcept
 {
 {
 	if (RR->node->putPacket(tPtr,_localSocket,_addr,data,len)) {
 	if (RR->node->putPacket(tPtr,_localSocket,_addr,data,len)) {
 		_lastOut = now;
 		_lastOut = now;
@@ -26,7 +26,7 @@ bool Path::send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigne
 	return false;
 	return false;
 }
 }
 
 
-bool Path::isAddressValidForPath(const InetAddress &a)
+bool Path::isAddressValidForPath(const InetAddress &a) noexcept
 {
 {
 	if ((a.ss_family == AF_INET)||(a.ss_family == AF_INET6)) {
 	if ((a.ss_family == AF_INET)||(a.ss_family == AF_INET6)) {
 		switch(a.ipScope()) {
 		switch(a.ipScope()) {

+ 12 - 13
node/Path.hpp

@@ -24,7 +24,6 @@
 #include "Constants.hpp"
 #include "Constants.hpp"
 #include "InetAddress.hpp"
 #include "InetAddress.hpp"
 #include "SharedPtr.hpp"
 #include "SharedPtr.hpp"
-#include "AtomicCounter.hpp"
 #include "Utils.hpp"
 #include "Utils.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
 
 
@@ -47,7 +46,7 @@ class Path
 	friend class Defragmenter;
 	friend class Defragmenter;
 
 
 public:
 public:
-	ZT_ALWAYS_INLINE Path(const int64_t l,const InetAddress &r) :
+	ZT_ALWAYS_INLINE Path(const int64_t l,const InetAddress &r) noexcept :
 		_localSocket(l),
 		_localSocket(l),
 		_lastIn(0),
 		_lastIn(0),
 		_lastOut(0),
 		_lastOut(0),
@@ -65,55 +64,55 @@ public:
 	 * @param now Current time
 	 * @param now Current time
 	 * @return True if transport reported success
 	 * @return True if transport reported success
 	 */
 	 */
-	bool send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now);
+	bool send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now) noexcept;
 
 
 	/**
 	/**
 	 * Explicitly update last sent time
 	 * Explicitly update last sent time
 	 *
 	 *
 	 * @param t Time of send
 	 * @param t Time of send
 	 */
 	 */
-	ZT_ALWAYS_INLINE void sent(const int64_t t) { _lastOut = t; }
+	ZT_ALWAYS_INLINE void sent(const int64_t t) noexcept { _lastOut = t; }
 
 
 	/**
 	/**
 	 * Called when a packet is received from this remote path, regardless of content
 	 * Called when a packet is received from this remote path, regardless of content
 	 *
 	 *
 	 * @param t Time of receive
 	 * @param t Time of receive
 	 */
 	 */
-	ZT_ALWAYS_INLINE void received(const int64_t t) { _lastIn = t; }
+	ZT_ALWAYS_INLINE void received(const int64_t t) noexcept { _lastIn = t; }
 
 
 	/**
 	/**
 	 * Check path aliveness
 	 * Check path aliveness
 	 *
 	 *
 	 * @param now Current time
 	 * @param now Current time
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool alive(const int64_t now) const { return ((now - _lastIn) < ZT_PATH_ALIVE_TIMEOUT); }
+	ZT_ALWAYS_INLINE bool alive(const int64_t now) const noexcept { return ((now - _lastIn) < ZT_PATH_ALIVE_TIMEOUT); }
 
 
 	/**
 	/**
 	 * Check if path is considered active
 	 * Check if path is considered active
 	 *
 	 *
 	 * @param now Current time
 	 * @param now Current time
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool active(const int64_t now) const { return ((now - _lastIn) < ZT_PATH_ACTIVITY_TIMEOUT); }
+	ZT_ALWAYS_INLINE bool active(const int64_t now) const noexcept { return ((now - _lastIn) < ZT_PATH_ACTIVITY_TIMEOUT); }
 
 
 	/**
 	/**
 	 * @return Physical address
 	 * @return Physical address
 	 */
 	 */
-	ZT_ALWAYS_INLINE const InetAddress &address() const { return _addr; }
+	ZT_ALWAYS_INLINE const InetAddress &address() const noexcept { return _addr; }
 
 
 	/**
 	/**
 	 * @return Local socket as specified by external code
 	 * @return Local socket as specified by external code
 	 */
 	 */
-	ZT_ALWAYS_INLINE int64_t localSocket() const { return _localSocket; }
+	ZT_ALWAYS_INLINE int64_t localSocket() const noexcept { return _localSocket; }
 
 
 	/**
 	/**
 	 * @return Last time we received anything
 	 * @return Last time we received anything
 	 */
 	 */
-	ZT_ALWAYS_INLINE int64_t lastIn() const { return _lastIn; }
+	ZT_ALWAYS_INLINE int64_t lastIn() const noexcept { return _lastIn; }
 
 
 	/**
 	/**
 	 * @return Last time we sent something
 	 * @return Last time we sent something
 	 */
 	 */
-	ZT_ALWAYS_INLINE int64_t lastOut() const { return _lastOut; }
+	ZT_ALWAYS_INLINE int64_t lastOut() const noexcept { return _lastOut; }
 
 
 	/**
 	/**
 	 * Check whether this address is valid for a ZeroTier path
 	 * Check whether this address is valid for a ZeroTier path
@@ -124,7 +123,7 @@ public:
 	 * @param a Address to check
 	 * @param a Address to check
 	 * @return True if address is good for ZeroTier path use
 	 * @return True if address is good for ZeroTier path use
 	 */
 	 */
-	static bool isAddressValidForPath(const InetAddress &a);
+	static bool isAddressValidForPath(const InetAddress &a) noexcept;
 
 
 private:
 private:
 	int64_t _localSocket;
 	int64_t _localSocket;
@@ -138,7 +137,7 @@ private:
 	std::set<uint64_t> _inboundFragmentedMessages;
 	std::set<uint64_t> _inboundFragmentedMessages;
 	Mutex _inboundFragmentedMessages_l;
 	Mutex _inboundFragmentedMessages_l;
 
 
-	AtomicCounter<int> __refCount;
+	std::atomic<int> __refCount;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 201 - 79
node/Peer.cpp

@@ -40,9 +40,11 @@ Peer::Peer(const RuntimeEnvironment *renv) :
 	_lastWhoisRequestReceived(0),
 	_lastWhoisRequestReceived(0),
 	_lastEchoRequestReceived(0),
 	_lastEchoRequestReceived(0),
 	_lastPushDirectPathsReceived(0),
 	_lastPushDirectPathsReceived(0),
+	_lastProbeReceived(0),
 	_lastAttemptedP2PInit(0),
 	_lastAttemptedP2PInit(0),
 	_lastTriedStaticPath(0),
 	_lastTriedStaticPath(0),
 	_lastPrioritizedPaths(0),
 	_lastPrioritizedPaths(0),
+	_lastAttemptedAggressiveNATTraversal(0),
 	_latency(0xffff),
 	_latency(0xffff),
 	_alivePathCount(0),
 	_alivePathCount(0),
 	_vProto(0),
 	_vProto(0),
@@ -52,12 +54,16 @@ Peer::Peer(const RuntimeEnvironment *renv) :
 {
 {
 }
 }
 
 
-bool Peer::init(const Identity &myIdentity,const Identity &peerIdentity)
+bool Peer::init(const Identity &peerIdentity)
 {
 {
+	RWMutex::Lock l(_lock);
 	if (_id == peerIdentity)
 	if (_id == peerIdentity)
 		return true;
 		return true;
 	_id = peerIdentity;
 	_id = peerIdentity;
-	return myIdentity.agree(peerIdentity,_key);
+	if (!RR->identity.agree(peerIdentity,_key))
+		return false;
+	_incomingProbe = Protocol::createProbe(_id,RR->identity,_key);
+	return true;
 }
 }
 
 
 void Peer::received(
 void Peer::received(
@@ -66,10 +72,7 @@ void Peer::received(
 	const unsigned int hops,
 	const unsigned int hops,
 	const uint64_t packetId,
 	const uint64_t packetId,
 	const unsigned int payloadLength,
 	const unsigned int payloadLength,
-	const Protocol::Verb verb,
-	const uint64_t inRePacketId,
-	const Protocol::Verb inReVerb,
-	const uint64_t networkId)
+	const Protocol::Verb verb)
 {
 {
 	const int64_t now = RR->node->now();
 	const int64_t now = RR->node->now();
 	_lastReceive = now;
 	_lastReceive = now;
@@ -204,25 +207,6 @@ path_check_done:
 	}
 	}
 }
 }
 
 
-bool Peer::shouldTryPath(void *tPtr,int64_t now,const SharedPtr<Peer> &suggestedBy,const InetAddress &addr) const
-{
-	int maxHaveScope = -1;
-	{
-		RWMutex::RLock l(_lock);
-		for (unsigned int i = 0; i < _alivePathCount; ++i) {
-			if (_paths[i]) {
-				if (_paths[i]->address().ipsEqual2(addr))
-					return false;
-
-				int s = (int)_paths[i]->address().ipScope();
-				if (s > maxHaveScope)
-					maxHaveScope = s;
-			}
-		}
-	}
-	return ( ((int)addr.ipScope() > maxHaveScope) && RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,addr) );
-}
-
 void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
 void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
 {
 {
 #if 0
 #if 0
@@ -247,6 +231,19 @@ void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atA
 #endif
 #endif
 }
 }
 
 
+void Peer::sendNOP(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
+{
+	Buf outp;
+	Protocol::Header &ph = outp.as<Protocol::Header>();
+	ph.packetId = Protocol::getPacketId();
+	_id.address().copyTo(ph.destination);
+	RR->identity.address().copyTo(ph.source);
+	ph.flags = 0;
+	ph.verb = Protocol::VERB_NOP;
+	Protocol::armor(outp,sizeof(Protocol::Header),_key,ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
+	RR->node->putPacket(tPtr,localSocket,atAddress,outp.b,sizeof(Protocol::Header));
+}
+
 void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
 void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
 {
 {
 	RWMutex::RLock l(_lock);
 	RWMutex::RLock l(_lock);
@@ -289,7 +286,7 @@ void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddres
 	}
 	}
 }
 }
 
 
-void Peer::updateLatency(const unsigned int l)
+void Peer::updateLatency(const unsigned int l) noexcept
 {
 {
 	if ((l > 0)&&(l < 0xffff)) {
 	if ((l > 0)&&(l < 0xffff)) {
 		unsigned int lat = _latency;
 		unsigned int lat = _latency;
@@ -301,45 +298,33 @@ void Peer::updateLatency(const unsigned int l)
 	}
 	}
 }
 }
 
 
-bool Peer::sendDirect(void *tPtr,const void *data,const unsigned int len,const int64_t now)
+SharedPtr<Path> Peer::path(const int64_t now)
 {
 {
 	if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
 	if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
 		_lastPrioritizedPaths = now;
 		_lastPrioritizedPaths = now;
-		_lock.lock();
+		RWMutex::Lock l(_lock);
 		_prioritizePaths(now);
 		_prioritizePaths(now);
-		if (_alivePathCount == 0) {
-			_lock.unlock();
-			return false;
-		}
-		const bool r = _paths[0]->send(RR,tPtr,data,len,now);
-		_lock.unlock();
-		return r;
+		if (_alivePathCount == 0)
+			return SharedPtr<Path>();
+		return _paths[0];
 	} else {
 	} else {
-		_lock.rlock();
-		if (_alivePathCount == 0) {
-			_lock.runlock();
-			return false;
-		}
-		const bool r = _paths[0]->send(RR,tPtr,data,len,now);
-		_lock.runlock();
-		return r;
+		RWMutex::RLock l(_lock);
+		if (_alivePathCount == 0)
+			return SharedPtr<Path>();
+		return _paths[0];
 	}
 	}
 }
 }
 
 
-SharedPtr<Path> Peer::path(const int64_t now)
+bool Peer::direct(const int64_t now)
 {
 {
 	if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
 	if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
 		_lastPrioritizedPaths = now;
 		_lastPrioritizedPaths = now;
 		RWMutex::Lock l(_lock);
 		RWMutex::Lock l(_lock);
 		_prioritizePaths(now);
 		_prioritizePaths(now);
-		if (_alivePathCount == 0)
-			return SharedPtr<Path>();
-		return _paths[0];
+		return (_alivePathCount > 0);
 	} else {
 	} else {
 		RWMutex::RLock l(_lock);
 		RWMutex::RLock l(_lock);
-		if (_alivePathCount == 0)
-			return SharedPtr<Path>();
-		return _paths[0];
+		return (_alivePathCount > 0);
 	}
 	}
 }
 }
 
 
@@ -369,7 +354,136 @@ void Peer::save(void *tPtr) const
 	free(buf);
 	free(buf);
 }
 }
 
 
-int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const
+void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,bool behindSymmetric,bool bfg1024)
+{
+	static uint8_t junk = 0;
+
+	InetAddress phyAddr(ep.inetAddr());
+	if (phyAddr) { // only this endpoint type is currently implemented
+		if (!RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,-1,phyAddr))
+			return;
+
+		// Sending a packet with a low TTL before the real message assists traversal with some
+		// stateful firewalls and is harmless otherwise AFAIK.
+		++junk;
+		RR->node->putPacket(tPtr,-1,phyAddr,&junk,1,2);
+
+		// In a few hundred milliseconds we'll send the real packet.
+		{
+			RWMutex::Lock l(_lock);
+			_contactQueue.push_back(_ContactQueueItem(phyAddr,ZT_MAX_PEER_NETWORK_PATHS));
+		}
+
+		// If the peer indicates that they may be behind a symmetric NAT and there are no
+		// living direct paths, try a few more aggressive things.
+		if ((behindSymmetric) && (phyAddr.ss_family == AF_INET) && (!direct(now))) {
+			unsigned int port = phyAddr.port();
+			if ((bfg1024)&&(port < 1024)&&(RR->node->natMustDie())) {
+				// If the other side is using a low-numbered port and has elected to
+				// have this done, we can try scanning every port below 1024. The search
+				// space here is small enough that we have a very good chance of punching.
+
+				// Generate a random order list of all <1024 ports except 0 and the original sending port.
+				uint16_t ports[1022];
+				uint16_t ctr = 1;
+				for (int i=0;i<1022;++i) {
+					if (ctr == port) ++ctr;
+					ports[i] = ctr++;
+				}
+				for (int i=0;i<512;++i) {
+					uint64_t rn = Utils::random();
+					unsigned int a = ((unsigned int)rn) % 1022;
+					unsigned int b = ((unsigned int)(rn >> 24U)) % 1022;
+					if (a != b) {
+						uint16_t tmp = ports[a];
+						ports[a] = ports[b];
+						ports[b] = tmp;
+					}
+				}
+
+				// Chunk ports into chunks of 128 to try in few hundred millisecond intervals,
+				// abandoning attempts once there is at least one direct path.
+				{
+					RWMutex::Lock l(_lock);
+					for (int i=0;i<896;i+=128)
+						_contactQueue.push_back(_ContactQueueItem(phyAddr,ports + i,ports + i + 128,1));
+					_contactQueue.push_back(_ContactQueueItem(phyAddr,ports + 896,ports + 1022,1));
+				}
+			} else {
+				// Otherwise use the simpler sequential port attempt method in intervals.
+				RWMutex::Lock l(_lock);
+				for (int k=0;k<3;++k) {
+					if (++port > 65535) break;
+					InetAddress tryNext(phyAddr);
+					tryNext.setPort(port);
+					_contactQueue.push_back(_ContactQueueItem(tryNext,1));
+				}
+			}
+		}
+
+		// Start alarms going off to actually send these...
+		RR->node->setPeerAlarm(_id.address(),now + ZT_NAT_TRAVERSAL_INTERVAL);
+	}
+}
+
+void Peer::alarm(void *tPtr,const int64_t now)
+{
+	// Pop one contact queue item and also clean the queue of any that are no
+	// longer applicable because the alive path count has exceeded their threshold.
+	bool stillHaveContactQueueItems;
+	_ContactQueueItem qi;
+	{
+		RWMutex::Lock l(_lock);
+
+		if (_contactQueue.empty())
+			return;
+		while (_alivePathCount >= _contactQueue.front().alivePathThreshold) {
+			_contactQueue.pop_front();
+			if (_contactQueue.empty())
+				return;
+		}
+
+		_ContactQueueItem &qi2 = _contactQueue.front();
+		qi.address = qi2.address;
+		qi.ports.swap(qi2.ports);
+		qi.alivePathThreshold = qi2.alivePathThreshold;
+		_contactQueue.pop_front();
+
+		for(std::list<_ContactQueueItem>::iterator q(_contactQueue.begin());q!=_contactQueue.end();) {
+			if (_alivePathCount >= q->alivePathThreshold)
+				_contactQueue.erase(q++);
+			else ++q;
+		}
+
+		stillHaveContactQueueItems = !_contactQueue.empty();
+	}
+
+	if (_vProto >= 11) {
+		uint64_t outgoingProbe = Protocol::createProbe(RR->identity,_id,_key);
+		if (qi.ports.empty()) {
+			RR->node->putPacket(tPtr,-1,qi.address,&outgoingProbe,ZT_PROTO_PROBE_LENGTH);
+		} else {
+			for (std::vector<uint16_t>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) {
+				qi.address.setPort(*p);
+				RR->node->putPacket(tPtr,-1,qi.address,&outgoingProbe,ZT_PROTO_PROBE_LENGTH);
+			}
+		}
+	} else {
+		if (qi.ports.empty()) {
+			this->sendNOP(tPtr,-1,qi.address,now);
+		} else {
+			for (std::vector<uint16_t>::iterator p(qi.ports.begin()); p != qi.ports.end(); ++p) {
+				qi.address.setPort(*p);
+				this->sendNOP(tPtr,-1,qi.address,now);
+			}
+		}
+	}
+
+	if (stillHaveContactQueueItems)
+		RR->node->setPeerAlarm(_id.address(),now + ZT_NAT_TRAVERSAL_INTERVAL);
+}
+
+int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
 {
 {
 	RWMutex::RLock l(_lock);
 	RWMutex::RLock l(_lock);
 
 
@@ -403,39 +517,47 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const
 	return p;
 	return p;
 }
 }
 
 
-int Peer::unmarshal(const uint8_t *restrict data,const int len)
+int Peer::unmarshal(const uint8_t *restrict data,const int len) noexcept
 {
 {
-	RWMutex::Lock l(_lock);
+	int p;
 
 
-	if ((len <= 1)||(data[0] != 0))
-		return -1;
+	{
+		RWMutex::Lock l(_lock);
 
 
-	int s = _id.unmarshal(data + 1,len - 1);
-	if (s <= 0)
-		return s;
-	int p = 1 + s;
-	s = _locator.unmarshal(data + p,len - p);
-	if (s <= 0)
-		return s;
-	p += s;
-	s = _bootstrap.unmarshal(data + p,len - p);
-	if (s <= 0)
-		return s;
-	p += s;
+		if ((len <= 1) || (data[0] != 0))
+			return -1;
+
+		int s = _id.unmarshal(data + 1,len - 1);
+		if (s <= 0)
+			return s;
+		p = 1 + s;
+		s = _locator.unmarshal(data + p,len - p);
+		if (s <= 0)
+			return s;
+		p += s;
+		s = _bootstrap.unmarshal(data + p,len - p);
+		if (s <= 0)
+			return s;
+		p += s;
+
+		if ((p + 10) > len)
+			return -1;
+		_vProto = Utils::loadBigEndian<uint16_t>(data + p);
+		p += 2;
+		_vMajor = Utils::loadBigEndian<uint16_t>(data + p);
+		p += 2;
+		_vMinor = Utils::loadBigEndian<uint16_t>(data + p);
+		p += 2;
+		_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
+		p += 2;
+		p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
+		if (p > len)
+			return -1;
+	}
 
 
-	if ((p + 10) > len)
-		return -1;
-	_vProto = Utils::loadBigEndian<uint16_t>(data + p);
-	p += 2;
-	_vMajor = Utils::loadBigEndian<uint16_t>(data + p);
-	p += 2;
-	_vMinor = Utils::loadBigEndian<uint16_t>(data + p);
-	p += 2;
-	_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
-	p += 2;
-	p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
-	if (p > len)
+	if (!RR->identity.agree(_id,_key))
 		return -1;
 		return -1;
+	_incomingProbe = Protocol::createProbe(_id,RR->identity,_key);
 
 
 	return p;
 	return p;
 }
 }

+ 104 - 54
node/Peer.hpp

@@ -23,7 +23,6 @@
 #include "Identity.hpp"
 #include "Identity.hpp"
 #include "InetAddress.hpp"
 #include "InetAddress.hpp"
 #include "SharedPtr.hpp"
 #include "SharedPtr.hpp"
-#include "AtomicCounter.hpp"
 #include "Hashtable.hpp"
 #include "Hashtable.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
 #include "Endpoint.hpp"
 #include "Endpoint.hpp"
@@ -31,6 +30,7 @@
 #include "Protocol.hpp"
 #include "Protocol.hpp"
 
 
 #include <vector>
 #include <vector>
+#include <list>
 
 
 // version, identity, locator, bootstrap, version info, length of any additional fields
 // version, identity, locator, bootstrap, version info, length of any additional fields
 #define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + ZT_INETADDRESS_MARSHAL_SIZE_MAX + (2*4) + 2)
 #define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + ZT_INETADDRESS_MARSHAL_SIZE_MAX + (2*4) + 2)
@@ -66,26 +66,25 @@ public:
 	/**
 	/**
 	 * Initialize peer with an identity
 	 * Initialize peer with an identity
 	 *
 	 *
-	 * @param myIdentity This node's identity including secret key
 	 * @param peerIdentity The peer's identity
 	 * @param peerIdentity The peer's identity
 	 * @return True if initialization was succcesful
 	 * @return True if initialization was succcesful
 	 */
 	 */
-	bool init(const Identity &myIdentity,const Identity &peerIdentity);
+	bool init(const Identity &peerIdentity);
 
 
 	/**
 	/**
 	 * @return This peer's ZT address (short for identity().address())
 	 * @return This peer's ZT address (short for identity().address())
 	 */
 	 */
-	ZT_ALWAYS_INLINE const Address &address() const { return _id.address(); }
+	ZT_ALWAYS_INLINE const Address &address() const noexcept { return _id.address(); }
 
 
 	/**
 	/**
 	 * @return This peer's identity
 	 * @return This peer's identity
 	 */
 	 */
-	ZT_ALWAYS_INLINE const Identity &identity() const { return _id; }
+	ZT_ALWAYS_INLINE const Identity &identity() const noexcept { return _id; }
 
 
 	/**
 	/**
 	 * @return Copy of current locator
 	 * @return Copy of current locator
 	 */
 	 */
-	ZT_ALWAYS_INLINE Locator locator() const
+	ZT_ALWAYS_INLINE Locator locator() const noexcept
 	{
 	{
 		RWMutex::RLock l(_lock);
 		RWMutex::RLock l(_lock);
 		return _locator;
 		return _locator;
@@ -102,9 +101,6 @@ public:
 	 * @param hops ZeroTier (not IP) hops
 	 * @param hops ZeroTier (not IP) hops
 	 * @param packetId Packet ID
 	 * @param packetId Packet ID
 	 * @param verb Packet verb
 	 * @param verb Packet verb
-	 * @param inRePacketId Packet ID in reply to (default: none)
-	 * @param inReVerb Verb in reply to (for OK/ERROR, default: VERB_NOP)
-	 * @param networkId Network ID if this packet is related to a network, 0 otherwise
 	 */
 	 */
 	void received(
 	void received(
 		void *tPtr,
 		void *tPtr,
@@ -112,32 +108,29 @@ public:
 		unsigned int hops,
 		unsigned int hops,
 		uint64_t packetId,
 		uint64_t packetId,
 		unsigned int payloadLength,
 		unsigned int payloadLength,
-		Protocol::Verb verb,
-		uint64_t inRePacketId,
-		Protocol::Verb inReVerb,
-		uint64_t networkId);
+		Protocol::Verb verb);
 
 
 	/**
 	/**
-	 * Check whether a path to this peer should be tried if received via e.g. RENDEZVOUS OR PUSH_DIRECT_PATHS
+	 * Send a HELLO to this peer at a specified physical address
+	 *
+	 * No statistics or sent times are updated here.
 	 *
 	 *
+	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
+	 * @param localSocket Local source socket
+	 * @param atAddress Destination address
 	 * @param now Current time
 	 * @param now Current time
-	 * @param suggestingPeer Peer suggesting path (may be this peer)
-	 * @param addr Remote address
-	 * @return True if we have an active path to this destination
 	 */
 	 */
-	bool shouldTryPath(void *tPtr,int64_t now,const SharedPtr<Peer> &suggestedBy,const InetAddress &addr) const;
+	void sendHELLO(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
 
 
 	/**
 	/**
-	 * Send a HELLO to this peer at a specified physical address
-	 *
-	 * No statistics or sent times are updated here.
+	 * Send a NOP message to e.g. probe a new link
 	 *
 	 *
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 * @param localSocket Local source socket
 	 * @param localSocket Local source socket
 	 * @param atAddress Destination address
 	 * @param atAddress Destination address
 	 * @param now Current time
 	 * @param now Current time
 	 */
 	 */
-	void sendHELLO(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
+	void sendNOP(void *tPtr,int64_t localSocket,const InetAddress &atAddress,int64_t now);
 
 
 	/**
 	/**
 	 * Send ping to this peer
 	 * Send ping to this peer
@@ -170,19 +163,19 @@ public:
 	 *
 	 *
 	 * @param l New latency measurment (in milliseconds)
 	 * @param l New latency measurment (in milliseconds)
 	 */
 	 */
-	void updateLatency(unsigned int l);
+	void updateLatency(unsigned int l) noexcept;
 
 
 	/**
 	/**
 	 * @return Bootstrap address or NULL if none
 	 * @return Bootstrap address or NULL if none
 	 */
 	 */
-	ZT_ALWAYS_INLINE const Endpoint &bootstrap() const { return _bootstrap; }
+	ZT_ALWAYS_INLINE const Endpoint &bootstrap() const noexcept { return _bootstrap; }
 
 
 	/**
 	/**
 	 * Set bootstrap endpoint
 	 * Set bootstrap endpoint
 	 *
 	 *
 	 * @param ep Bootstrap endpoint
 	 * @param ep Bootstrap endpoint
 	 */
 	 */
-	ZT_ALWAYS_INLINE void setBootstrap(const Endpoint &ep)
+	ZT_ALWAYS_INLINE void setBootstrap(const Endpoint &ep) noexcept
 	{
 	{
 		_lock.lock();
 		_lock.lock();
 		_bootstrap = ep;
 		_bootstrap = ep;
@@ -192,27 +185,32 @@ public:
 	/**
 	/**
 	 * @return Time of last receive of anything, whether direct or relayed
 	 * @return Time of last receive of anything, whether direct or relayed
 	 */
 	 */
-	ZT_ALWAYS_INLINE int64_t lastReceive() const { return _lastReceive; }
+	ZT_ALWAYS_INLINE int64_t lastReceive() const noexcept { return _lastReceive; }
 
 
 	/**
 	/**
 	 * @return True if we've heard from this peer in less than ZT_PEER_ALIVE_TIMEOUT
 	 * @return True if we've heard from this peer in less than ZT_PEER_ALIVE_TIMEOUT
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool alive(const int64_t now) const { return ((now - _lastReceive) < ZT_PEER_ALIVE_TIMEOUT); }
+	ZT_ALWAYS_INLINE bool alive(const int64_t now) const noexcept { return ((now - _lastReceive) < ZT_PEER_ALIVE_TIMEOUT); }
 
 
 	/**
 	/**
 	 * @return True if we've heard from this peer in less than ZT_PEER_ACTIVITY_TIMEOUT
 	 * @return True if we've heard from this peer in less than ZT_PEER_ACTIVITY_TIMEOUT
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool active(const int64_t now) const { return ((now - _lastReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
+	ZT_ALWAYS_INLINE bool active(const int64_t now) const noexcept { return ((now - _lastReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
 
 
 	/**
 	/**
 	 * @return Latency in milliseconds of best/aggregate path or 0xffff if unknown
 	 * @return Latency in milliseconds of best/aggregate path or 0xffff if unknown
 	 */
 	 */
-	ZT_ALWAYS_INLINE unsigned int latency() const { return _latency; }
+	ZT_ALWAYS_INLINE unsigned int latency() const noexcept { return _latency; }
 
 
 	/**
 	/**
 	 * @return 256-bit secret symmetric encryption key
 	 * @return 256-bit secret symmetric encryption key
 	 */
 	 */
-	ZT_ALWAYS_INLINE const unsigned char *key() const { return _key; }
+	ZT_ALWAYS_INLINE const unsigned char *key() const noexcept { return _key; }
+
+	/**
+	 * @return Incoming probe packet (in big-endian byte order)
+0	 */
+	ZT_ALWAYS_INLINE uint64_t incomingProbe() const noexcept { return _incomingProbe; }
 
 
 	/**
 	/**
 	 * Set the currently known remote version of this peer's client
 	 * Set the currently known remote version of this peer's client
@@ -222,7 +220,7 @@ public:
 	 * @param vmin Minor version
 	 * @param vmin Minor version
 	 * @param vrev Revision
 	 * @param vrev Revision
 	 */
 	 */
-	ZT_ALWAYS_INLINE void setRemoteVersion(unsigned int vproto,unsigned int vmaj,unsigned int vmin,unsigned int vrev)
+	ZT_ALWAYS_INLINE void setRemoteVersion(unsigned int vproto,unsigned int vmaj,unsigned int vmin,unsigned int vrev) noexcept
 	{
 	{
 		_vProto = (uint16_t)vproto;
 		_vProto = (uint16_t)vproto;
 		_vMajor = (uint16_t)vmaj;
 		_vMajor = (uint16_t)vmaj;
@@ -230,16 +228,16 @@ public:
 		_vRevision = (uint16_t)vrev;
 		_vRevision = (uint16_t)vrev;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE unsigned int remoteVersionProtocol() const { return _vProto; }
-	ZT_ALWAYS_INLINE unsigned int remoteVersionMajor() const { return _vMajor; }
-	ZT_ALWAYS_INLINE unsigned int remoteVersionMinor() const { return _vMinor; }
-	ZT_ALWAYS_INLINE unsigned int remoteVersionRevision() const { return _vRevision; }
-	ZT_ALWAYS_INLINE bool remoteVersionKnown() const { return ((_vMajor > 0)||(_vMinor > 0)||(_vRevision > 0)); }
+	ZT_ALWAYS_INLINE unsigned int remoteVersionProtocol() const noexcept { return _vProto; }
+	ZT_ALWAYS_INLINE unsigned int remoteVersionMajor() const noexcept { return _vMajor; }
+	ZT_ALWAYS_INLINE unsigned int remoteVersionMinor() const noexcept { return _vMinor; }
+	ZT_ALWAYS_INLINE unsigned int remoteVersionRevision() const noexcept { return _vRevision; }
+	ZT_ALWAYS_INLINE bool remoteVersionKnown() const noexcept { return ((_vMajor > 0)||(_vMinor > 0)||(_vRevision > 0)); }
 
 
 	/**
 	/**
 	 * Rate limit gate for inbound WHOIS requests
 	 * Rate limit gate for inbound WHOIS requests
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool rateGateInboundWhoisRequest(const int64_t now)
+	ZT_ALWAYS_INLINE bool rateGateInboundWhoisRequest(const int64_t now) noexcept
 	{
 	{
 		if ((now - _lastWhoisRequestReceived) >= ZT_PEER_WHOIS_RATE_LIMIT) {
 		if ((now - _lastWhoisRequestReceived) >= ZT_PEER_WHOIS_RATE_LIMIT) {
 			_lastWhoisRequestReceived = now;
 			_lastWhoisRequestReceived = now;
@@ -251,7 +249,7 @@ public:
 	/**
 	/**
 	 * Rate limit gate for inbound PUSH_DIRECT_PATHS requests
 	 * Rate limit gate for inbound PUSH_DIRECT_PATHS requests
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool rateGateInboundPushDirectPaths(const int64_t now)
+	ZT_ALWAYS_INLINE bool rateGateInboundPushDirectPaths(const int64_t now) noexcept
 	{
 	{
 		if ((now - _lastPushDirectPathsReceived) >= ZT_DIRECT_PATH_PUSH_INTERVAL) {
 		if ((now - _lastPushDirectPathsReceived) >= ZT_DIRECT_PATH_PUSH_INTERVAL) {
 			_lastPushDirectPathsReceived = now;
 			_lastPushDirectPathsReceived = now;
@@ -260,10 +258,22 @@ public:
 		return false;
 		return false;
 	}
 	}
 
 
+	/**
+	 * Rate limit attempts in response to incoming short probe packets
+	 */
+	ZT_ALWAYS_INLINE bool rateGateInboundProbe(const int64_t now) noexcept
+	{
+		if ((now - _lastProbeReceived) >= ZT_DIRECT_PATH_PUSH_INTERVAL) {
+			_lastProbeReceived = now;
+			return true;
+		}
+		return false;
+	}
+
 	/**
 	/**
 	 * Rate limit gate for inbound ECHO requests
 	 * Rate limit gate for inbound ECHO requests
 	 */
 	 */
-	ZT_ALWAYS_INLINE bool rateGateEchoRequest(const int64_t now)
+	ZT_ALWAYS_INLINE bool rateGateEchoRequest(const int64_t now) noexcept
 	{
 	{
 		if ((now - _lastEchoRequestReceived) >= ZT_PEER_GENERAL_RATE_LIMIT) {
 		if ((now - _lastEchoRequestReceived) >= ZT_PEER_GENERAL_RATE_LIMIT) {
 			_lastEchoRequestReceived = now;
 			_lastEchoRequestReceived = now;
@@ -273,20 +283,14 @@ public:
 	}
 	}
 
 
 	/**
 	/**
-	 * Send directly if a direct path exists
-	 *
-	 * @param tPtr Thread pointer supplied by user
-	 * @param data Data to send
-	 * @param len Length of data
-	 * @param now Current time
-	 * @return True if packet appears to have been sent, false if no path or send failed
+	 * @return Current best path
 	 */
 	 */
-	bool sendDirect(void *tPtr,const void *data,unsigned int len,int64_t now);
+	SharedPtr<Path> path(int64_t now);
 
 
 	/**
 	/**
-	 * @return Current best path
+	 * @return True if there is at least one alive direct path
 	 */
 	 */
-	SharedPtr<Path> path(int64_t now);
+	bool direct(int64_t now);
 
 
 	/**
 	/**
 	 * Get all paths
 	 * Get all paths
@@ -300,11 +304,32 @@ public:
 	 */
 	 */
 	void save(void *tPtr) const;
 	void save(void *tPtr) const;
 
 
+	/**
+	 * Attempt to contact this peer at a physical address
+	 *
+	 * This checks rate limits, path usability, sometimes deploys advanced NAT-t techniques, etc.
+	 *
+	 * @param tPtr External user pointer we pass around
+	 * @param ep Endpoint to attempt to contact
+	 * @param now Current time
+	 * @param behindSymmetric This peer may be behind a symmetric NAT (only meaningful for IPv4)
+	 * @param bfg1024 Use BFG1024 brute force symmetric NAT busting algorithm if applicable
+	 */
+	void contact(void *tPtr,const Endpoint &ep,int64_t now,bool behindSymmetric,bool bfg1024);
+
+	/**
+	 * Called by Node when an alarm set by this peer goes off
+	 *
+	 * @param tPtr External user pointer we pass around
+	 * @param now Current time
+	 */
+	void alarm(void *tPtr,int64_t now);
+
 	// NOTE: peer marshal/unmarshal only saves/restores the identity, locator, most
 	// NOTE: peer marshal/unmarshal only saves/restores the identity, locator, most
 	// recent bootstrap address, and version information.
 	// recent bootstrap address, and version information.
-	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_PEER_MARSHAL_SIZE_MAX; }
-	int marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const;
-	int unmarshal(const uint8_t *restrict data,int len);
+	static constexpr int marshalSizeMax() noexcept { return ZT_PEER_MARSHAL_SIZE_MAX; }
+	int marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept;
+	int unmarshal(const uint8_t *restrict data,int len) noexcept;
 
 
 private:
 private:
 	void _prioritizePaths(int64_t now);
 	void _prioritizePaths(int64_t now);
@@ -317,19 +342,44 @@ private:
 	volatile int64_t _lastWhoisRequestReceived;
 	volatile int64_t _lastWhoisRequestReceived;
 	volatile int64_t _lastEchoRequestReceived;
 	volatile int64_t _lastEchoRequestReceived;
 	volatile int64_t _lastPushDirectPathsReceived;
 	volatile int64_t _lastPushDirectPathsReceived;
+	volatile int64_t _lastProbeReceived;
 	volatile int64_t _lastAttemptedP2PInit;
 	volatile int64_t _lastAttemptedP2PInit;
 	volatile int64_t _lastTriedStaticPath;
 	volatile int64_t _lastTriedStaticPath;
 	volatile int64_t _lastPrioritizedPaths;
 	volatile int64_t _lastPrioritizedPaths;
+	volatile int64_t _lastAttemptedAggressiveNATTraversal;
 	volatile unsigned int _latency;
 	volatile unsigned int _latency;
 
 
-	AtomicCounter<int> __refCount;
+	std::atomic<int> __refCount;
 
 
-	RWMutex _lock; // locks _alivePathCount, _paths, _locator, and _bootstrap.
+	// Lock for non-volatile read/write fields
+	RWMutex _lock;
 
 
+	// Number of paths current alive as of last _prioritizePaths
 	unsigned int _alivePathCount;
 	unsigned int _alivePathCount;
+
+	// Direct paths sorted in descending order of preference (can be NULL, if first is NULL there's no direct path)
 	SharedPtr<Path> _paths[ZT_MAX_PEER_NETWORK_PATHS];
 	SharedPtr<Path> _paths[ZT_MAX_PEER_NETWORK_PATHS];
 
 
+	// Queue of batches of one or more physical addresses to try at some point in the future (for NAT traversal logic)
+	struct _ContactQueueItem
+	{
+		ZT_ALWAYS_INLINE _ContactQueueItem() {}
+		ZT_ALWAYS_INLINE _ContactQueueItem(const InetAddress &a,const uint16_t *pstart,const uint16_t *pend,const unsigned int apt) :
+			address(a),
+			ports(pstart,pend),
+			alivePathThreshold(apt) {}
+		ZT_ALWAYS_INLINE _ContactQueueItem(const InetAddress &a,const unsigned int apt) :
+			address(a),
+			ports(),
+			alivePathThreshold(apt) {}
+		InetAddress address;
+		std::vector<uint16_t> ports; // if non-empty try these ports, otherwise use the one in address
+		unsigned int alivePathThreshold; // skip and forget if alive path count is >= this
+	};
+	std::list<_ContactQueueItem> _contactQueue;
+
 	Identity _id;
 	Identity _id;
+	uint64_t _incomingProbe;
 	Locator _locator;
 	Locator _locator;
 	Endpoint _bootstrap; // right now only InetAddress endpoints are supported for bootstrap
 	Endpoint _bootstrap; // right now only InetAddress endpoints are supported for bootstrap
 
 

+ 3 - 2
node/Poly1305.cpp

@@ -467,7 +467,8 @@ poly1305_finish(poly1305_context *ctx, unsigned char mac[16]) {
 
 
 #endif // MSC/GCC or not
 #endif // MSC/GCC or not
 
 
-static inline void poly1305_update(poly1305_context *ctx, const unsigned char *m, size_t bytes) {
+static ZT_ALWAYS_INLINE void poly1305_update(poly1305_context *ctx, const unsigned char *m, size_t bytes) noexcept
+{
   poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
   poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
   size_t i;
   size_t i;
 
 
@@ -505,7 +506,7 @@ static inline void poly1305_update(poly1305_context *ctx, const unsigned char *m
 
 
 } // anonymous namespace
 } // anonymous namespace
 
 
-void poly1305(void *auth,const void *data,unsigned int len,const void *key)
+void poly1305(void *auth,const void *data,unsigned int len,const void *key) noexcept
 {
 {
   poly1305_context ctx;
   poly1305_context ctx;
   poly1305_init(&ctx,reinterpret_cast<const unsigned char *>(key));
   poly1305_init(&ctx,reinterpret_cast<const unsigned char *>(key));

+ 1 - 1
node/Poly1305.hpp

@@ -27,7 +27,7 @@ namespace ZeroTier {
  * @param len Length of data to authenticate in bytes
  * @param len Length of data to authenticate in bytes
  * @param key 32-byte one-time use key to authenticate data (must not be reused)
  * @param key 32-byte one-time use key to authenticate data (must not be reused)
  */
  */
-void poly1305(void *auth,const void *data,unsigned int len,const void *key);
+void poly1305(void *auth,const void *data,unsigned int len,const void *key) noexcept;
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier
 
 

+ 13 - 3
node/Protocol.cpp

@@ -62,7 +62,17 @@ uintptr_t _checkSizes()
 // Make compiler compile and "run" _checkSizes()
 // Make compiler compile and "run" _checkSizes()
 volatile uintptr_t _checkSizesIMeanIt = _checkSizes();
 volatile uintptr_t _checkSizesIMeanIt = _checkSizes();
 
 
-uint64_t getPacketId()
+uint64_t createProbe(const Identity &sender,const Identity &recipient,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH]) noexcept
+{
+	uint8_t tmp[ZT_IDENTITY_HASH_SIZE + ZT_IDENTITY_HASH_SIZE];
+	memcpy(tmp,sender.hash(),ZT_IDENTITY_HASH_SIZE);
+	memcpy(tmp + ZT_IDENTITY_HASH_SIZE,recipient.hash(),ZT_IDENTITY_HASH_SIZE);
+	uint64_t hash[6];
+	SHA384(hash,tmp,sizeof(tmp),key,ZT_PEER_SECRET_KEY_LENGTH);
+	return hash[0];
+}
+
+uint64_t getPacketId() noexcept
 {
 {
 #ifdef ZT_PACKET_USE_ATOMIC_INTRINSICS
 #ifdef ZT_PACKET_USE_ATOMIC_INTRINSICS
 	return __sync_add_and_fetch(&_packetIdCtr,1ULL);
 	return __sync_add_and_fetch(&_packetIdCtr,1ULL);
@@ -71,7 +81,7 @@ uint64_t getPacketId()
 #endif
 #endif
 }
 }
 
 
-void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite)
+void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite) noexcept
 {
 {
 	Protocol::Header &ph = pkt.as<Protocol::Header>();
 	Protocol::Header &ph = pkt.as<Protocol::Header>();
 	ph.flags = (ph.flags & 0xc7U) | ((cipherSuite << 3U) & 0x38U); // flags: FFCCCHHH where CCC is cipher
 	ph.flags = (ph.flags & 0xc7U) | ((cipherSuite << 3U) & 0x38U); // flags: FFCCCHHH where CCC is cipher
@@ -113,7 +123,7 @@ void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY
 	}
 	}
 }
 }
 
 
-unsigned int compress(SharedPtr<Buf> &pkt,unsigned int packetSize)
+unsigned int compress(SharedPtr<Buf> &pkt,unsigned int packetSize) noexcept
 {
 {
 	if (packetSize <= 128)
 	if (packetSize <= 128)
 		return packetSize;
 		return packetSize;

+ 58 - 6
node/Protocol.hpp

@@ -20,6 +20,8 @@
 #include "Poly1305.hpp"
 #include "Poly1305.hpp"
 #include "LZ4.hpp"
 #include "LZ4.hpp"
 #include "Buf.hpp"
 #include "Buf.hpp"
+#include "Address.hpp"
+#include "Identity.hpp"
 
 
 /**
 /**
  * Protocol version -- incremented only for major changes
  * Protocol version -- incremented only for major changes
@@ -55,6 +57,7 @@
  *    + AES encryption support
  *    + AES encryption support
  *    + NIST P-384 (type 1) identities
  *    + NIST P-384 (type 1) identities
  *    + Ephemeral keys
  *    + Ephemeral keys
+ *    + Short probe packets to reduce probe bandwidth
  */
  */
 #define ZT_PROTO_VERSION 11
 #define ZT_PROTO_VERSION 11
 
 
@@ -127,11 +130,21 @@
  */
  */
 #define ZT_PROTO_PACKET_FRAGMENT_INDICATOR_INDEX 13
 #define ZT_PROTO_PACKET_FRAGMENT_INDICATOR_INDEX 13
 
 
+/**
+ * Index of flags field in regular packet headers
+ */
+#define ZT_PROTO_PACKET_FLAGS_INDEX 18
+
 /**
 /**
  * Minimum viable length for a fragment
  * Minimum viable length for a fragment
  */
  */
 #define ZT_PROTO_MIN_FRAGMENT_LENGTH 16
 #define ZT_PROTO_MIN_FRAGMENT_LENGTH 16
 
 
+/**
+ * Length of a probe
+ */
+#define ZT_PROTO_PROBE_LENGTH 8
+
 /**
 /**
  * Index at which packet fragment payload starts
  * Index at which packet fragment payload starts
  */
  */
@@ -809,6 +822,12 @@ ZT_PACKED_STRUCT(struct EXT_FRAME
 	uint8_t flags;
 	uint8_t flags;
 });
 });
 
 
+ZT_PACKED_STRUCT(struct PUSH_DIRECT_PATHS
+{
+	Header h;
+	uint16_t numPaths;
+});
+
 ZT_PACKED_STRUCT(struct MULTICAST_LIKE
 ZT_PACKED_STRUCT(struct MULTICAST_LIKE
 {
 {
 	ZT_PACKED_STRUCT(struct Entry
 	ZT_PACKED_STRUCT(struct Entry
@@ -908,14 +927,37 @@ ZT_PACKED_STRUCT(struct UNSUPPORTED_OPERATION__NETWORK_CONFIG_REQUEST
 /****************************************************************************/
 /****************************************************************************/
 
 
 /**
 /**
+ * Convenience function to pull packet ID from a raw buffer
+ *
+ * @param pkt Packet to read first 8 bytes from
+ * @param packetSize Packet's actual size in bytes
+ * @return Packet ID or 0 if packet size is less than 8
+ */
+ZT_ALWAYS_INLINE uint64_t packetId(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= 8) ? Utils::loadBigEndian<uint64_t>(pkt.b) : 0ULL; }
+
+/**
+ * @param Packet to extract hops from
+ * @param packetSize Packet's actual size in bytes
  * @return 3-bit hops field embedded in packet flags field
  * @return 3-bit hops field embedded in packet flags field
  */
  */
-ZT_ALWAYS_INLINE uint8_t packetHops(const Header &h) { return (h.flags & 0x07U); }
+ZT_ALWAYS_INLINE uint8_t packetHops(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= ZT_PROTO_PACKET_FLAGS_INDEX) ? (pkt.b[ZT_PROTO_PACKET_FLAGS_INDEX] & 0x07U) : 0; }
 
 
 /**
 /**
+ * @param Packet to extract cipher ID from
+ * @param packetSize Packet's actual size in bytes
  * @return 3-bit cipher field embedded in packet flags field
  * @return 3-bit cipher field embedded in packet flags field
  */
  */
-ZT_ALWAYS_INLINE uint8_t packetCipher(const Header &h) { return ((h.flags >> 3U) & 0x07U); }
+ZT_ALWAYS_INLINE uint8_t packetCipher(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= ZT_PROTO_PACKET_FLAGS_INDEX) ? ((pkt.b[ZT_PROTO_PACKET_FLAGS_INDEX] >> 3U) & 0x07U) : 0; }
+
+/**
+ * @return 3-bit hops field embedded in packet flags field
+ */
+ZT_ALWAYS_INLINE uint8_t packetHops(const Header &ph) noexcept { return (ph.flags & 0x07U); }
+
+/**
+ * @return 3-bit cipher field embedded in packet flags field
+ */
+ZT_ALWAYS_INLINE uint8_t packetCipher(const Header &ph) noexcept { return ((ph.flags >> 3U) & 0x07U); }
 
 
 /**
 /**
  * Deterministically mangle a 256-bit crypto key based on packet characteristics
  * Deterministically mangle a 256-bit crypto key based on packet characteristics
@@ -927,7 +969,7 @@ ZT_ALWAYS_INLINE uint8_t packetCipher(const Header &h) { return ((h.flags >> 3U)
  * @param in Input key (32 bytes)
  * @param in Input key (32 bytes)
  * @param out Output buffer (32 bytes)
  * @param out Output buffer (32 bytes)
  */
  */
-ZT_ALWAYS_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const out,const Buf &packet,const unsigned int packetSize)
+ZT_ALWAYS_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const out,const Buf &packet,const unsigned int packetSize) noexcept
 {
 {
 	// IV and source/destination addresses. Using the addresses divides the
 	// IV and source/destination addresses. Using the addresses divides the
 	// key space into two halves-- A->B and B->A (since order will change).
 	// key space into two halves-- A->B and B->A (since order will change).
@@ -960,12 +1002,22 @@ ZT_ALWAYS_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const
 #endif
 #endif
 }
 }
 
 
+/**
+ * Create a short probe packet for probing a recipient for e.g. NAT traversal and path setup
+ *
+ * @param sender Sender identity
+ * @param recipient Recipient identity
+ * @param key Long-term shared secret key resulting from sender and recipient agreement
+ * @return Probe packed into 64-bit integer (in big-endian byte order)
+ */
+uint64_t createProbe(const Identity &sender,const Identity &recipient,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH]) noexcept;
+
 /**
 /**
  * Get a sequential non-repeating packet ID for the next packet (thread-safe)
  * Get a sequential non-repeating packet ID for the next packet (thread-safe)
  *
  *
  * @return Next packet ID / cryptographic nonce
  * @return Next packet ID / cryptographic nonce
  */
  */
-uint64_t getPacketId();
+uint64_t getPacketId() noexcept;
 
 
 /**
 /**
  * Encrypt and compute packet MAC
  * Encrypt and compute packet MAC
@@ -975,7 +1027,7 @@ uint64_t getPacketId();
  * @param key Key to use for encryption (not per-packet key)
  * @param key Key to use for encryption (not per-packet key)
  * @param cipherSuite Cipher suite to use for AEAD encryption or just MAC
  * @param cipherSuite Cipher suite to use for AEAD encryption or just MAC
  */
  */
-void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite);
+void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite) noexcept;
 
 
 /**
 /**
  * Attempt to compress packet payload
  * Attempt to compress packet payload
@@ -989,7 +1041,7 @@ void armor(Buf &pkt,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY
  * @param packetSize Total size of packet in bytes (including headers)
  * @param packetSize Total size of packet in bytes (including headers)
  * @return New size of packet after compression or original size of compression wasn't helpful
  * @return New size of packet after compression or original size of compression wasn't helpful
  */
  */
-unsigned int compress(SharedPtr<Buf> &pkt,unsigned int packetSize);
+unsigned int compress(SharedPtr<Buf> &pkt,unsigned int packetSize) noexcept;
 
 
 } // namespace Protocol
 } // namespace Protocol
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 3 - 3
node/Revocation.cpp

@@ -15,7 +15,7 @@
 
 
 namespace ZeroTier {
 namespace ZeroTier {
 
 
-bool Revocation::sign(const Identity &signer)
+bool Revocation::sign(const Identity &signer) noexcept
 {
 {
 	uint8_t buf[ZT_REVOCATION_MARSHAL_SIZE_MAX+32];
 	uint8_t buf[ZT_REVOCATION_MARSHAL_SIZE_MAX+32];
 	if (signer.hasPrivate()) {
 	if (signer.hasPrivate()) {
@@ -26,7 +26,7 @@ bool Revocation::sign(const Identity &signer)
 	return false;
 	return false;
 }
 }
 
 
-int Revocation::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign) const
+int Revocation::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign) const noexcept
 {
 {
 	int p = 0;
 	int p = 0;
 	if (forSign) {
 	if (forSign) {
@@ -58,7 +58,7 @@ int Revocation::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSig
 	return p;
 	return p;
 }
 }
 
 
-int Revocation::unmarshal(const uint8_t *restrict data,const int len)
+int Revocation::unmarshal(const uint8_t *restrict data,const int len) noexcept
 {
 {
 	if (len < 54)
 	if (len < 54)
 		return -1;
 		return -1;

+ 18 - 18
node/Revocation.hpp

@@ -45,9 +45,9 @@ class Revocation : public Credential
 	friend class Credential;
 	friend class Credential;
 
 
 public:
 public:
-	static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_REVOCATION; }
+	static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_REVOCATION; }
 
 
-	ZT_ALWAYS_INLINE Revocation() { memoryZero(this); }
+	ZT_ALWAYS_INLINE Revocation() noexcept { memoryZero(this); }
 
 
 	/**
 	/**
 	 * @param i ID (arbitrary for revocations, currently random)
 	 * @param i ID (arbitrary for revocations, currently random)
@@ -58,7 +58,7 @@ public:
 	 * @param tgt Target node whose credential(s) are being revoked
 	 * @param tgt Target node whose credential(s) are being revoked
 	 * @param ct Credential type being revoked
 	 * @param ct Credential type being revoked
 	 */
 	 */
-	ZT_ALWAYS_INLINE Revocation(const uint32_t i,const uint64_t nwid,const uint32_t cid,const uint64_t thr,const uint64_t fl,const Address &tgt,const ZT_CredentialType ct) :
+	ZT_ALWAYS_INLINE Revocation(const uint32_t i,const uint64_t nwid,const uint32_t cid,const uint64_t thr,const uint64_t fl,const Address &tgt,const ZT_CredentialType ct) noexcept :
 		_id(i),
 		_id(i),
 		_credentialId(cid),
 		_credentialId(cid),
 		_networkId(nwid),
 		_networkId(nwid),
@@ -71,22 +71,22 @@ public:
 	{
 	{
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE uint32_t id() const { return _id; }
-	ZT_ALWAYS_INLINE uint32_t credentialId() const { return _credentialId; }
-	ZT_ALWAYS_INLINE uint64_t networkId() const { return _networkId; }
-	ZT_ALWAYS_INLINE int64_t threshold() const { return _threshold; }
-	ZT_ALWAYS_INLINE const Address &target() const { return _target; }
-	ZT_ALWAYS_INLINE const Address &signer() const { return _signedBy; }
-	ZT_ALWAYS_INLINE ZT_CredentialType typeBeingRevoked() const { return _type; }
-	ZT_ALWAYS_INLINE const uint8_t *signature() const { return _signature; }
-	ZT_ALWAYS_INLINE unsigned int signatureLength() const { return _signatureLength; }
-	ZT_ALWAYS_INLINE bool fastPropagate() const { return ((_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0); }
+	ZT_ALWAYS_INLINE uint32_t id() const noexcept { return _id; }
+	ZT_ALWAYS_INLINE uint32_t credentialId() const noexcept { return _credentialId; }
+	ZT_ALWAYS_INLINE uint64_t networkId() const noexcept { return _networkId; }
+	ZT_ALWAYS_INLINE int64_t threshold() const noexcept { return _threshold; }
+	ZT_ALWAYS_INLINE const Address &target() const noexcept { return _target; }
+	ZT_ALWAYS_INLINE const Address &signer() const noexcept { return _signedBy; }
+	ZT_ALWAYS_INLINE ZT_CredentialType typeBeingRevoked() const noexcept { return _type; }
+	ZT_ALWAYS_INLINE const uint8_t *signature() const noexcept { return _signature; }
+	ZT_ALWAYS_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
+	ZT_ALWAYS_INLINE bool fastPropagate() const noexcept { return ((_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0); }
 
 
 	/**
 	/**
 	 * @param signer Signing identity, must have private key
 	 * @param signer Signing identity, must have private key
 	 * @return True if signature was successful
 	 * @return True if signature was successful
 	 */
 	 */
-	bool sign(const Identity &signer);
+	bool sign(const Identity &signer) noexcept;
 
 
 	/**
 	/**
 	 * Verify this revocation's signature
 	 * Verify this revocation's signature
@@ -94,11 +94,11 @@ public:
 	 * @param RR Runtime environment to provide for peer lookup, etc.
 	 * @param RR Runtime environment to provide for peer lookup, etc.
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 */
 	 */
-	ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); }
+	ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const noexcept { return _verify(RR,tPtr,*this); }
 
 
-	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_REVOCATION_MARSHAL_SIZE_MAX; }
-	int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign = false) const;
-	int unmarshal(const uint8_t *restrict data,int len);
+	static constexpr int marshalSizeMax() noexcept { return ZT_REVOCATION_MARSHAL_SIZE_MAX; }
+	int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign = false) const noexcept;
+	int unmarshal(const uint8_t *restrict data,int len) noexcept;
 
 
 private:
 private:
 	uint32_t _id;
 	uint32_t _id;

+ 0 - 2
node/RuntimeEnvironment.hpp

@@ -14,8 +14,6 @@
 #ifndef ZT_RUNTIMEENVIRONMENT_HPP
 #ifndef ZT_RUNTIMEENVIRONMENT_HPP
 #define ZT_RUNTIMEENVIRONMENT_HPP
 #define ZT_RUNTIMEENVIRONMENT_HPP
 
 
-#include <string.h>
-
 #include "Constants.hpp"
 #include "Constants.hpp"
 #include "Utils.hpp"
 #include "Utils.hpp"
 #include "Identity.hpp"
 #include "Identity.hpp"

+ 14 - 14
node/ScopedPtr.hpp

@@ -28,30 +28,30 @@ template<typename T>
 class ScopedPtr : public TriviallyCopyable
 class ScopedPtr : public TriviallyCopyable
 {
 {
 public:
 public:
-	explicit ZT_ALWAYS_INLINE ScopedPtr(T *const p) : _p(p) {}
+	explicit ZT_ALWAYS_INLINE ScopedPtr(T *const p) noexcept : _p(p) {}
 	ZT_ALWAYS_INLINE ~ScopedPtr() { delete _p; }
 	ZT_ALWAYS_INLINE ~ScopedPtr() { delete _p; }
 
 
-	ZT_ALWAYS_INLINE T *operator->() const { return _p; }
-	ZT_ALWAYS_INLINE T &operator*() const { return *_p; }
-	explicit ZT_ALWAYS_INLINE operator bool() const { return (_p != (T *)0); }
-	ZT_ALWAYS_INLINE T *ptr() const { return _p; }
+	ZT_ALWAYS_INLINE T *operator->() const noexcept { return _p; }
+	ZT_ALWAYS_INLINE T &operator*() const noexcept { return *_p; }
+	explicit ZT_ALWAYS_INLINE operator bool() const noexcept { return (_p != (T *)0); }
+	ZT_ALWAYS_INLINE T *ptr() const noexcept { return _p; }
 
 
-	ZT_ALWAYS_INLINE void swap(const ScopedPtr &p)
+	ZT_ALWAYS_INLINE void swap(const ScopedPtr &p) noexcept
 	{
 	{
 		T *const tmp = _p;
 		T *const tmp = _p;
 		_p = p._p;
 		_p = p._p;
 		p._p = tmp;
 		p._p = tmp;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE bool operator==(const ScopedPtr &p) const { return (_p == p._p); }
-	ZT_ALWAYS_INLINE bool operator!=(const ScopedPtr &p) const { return (_p != p._p); }
-	ZT_ALWAYS_INLINE bool operator==(T *const p) const { return (_p == p); }
-	ZT_ALWAYS_INLINE bool operator!=(T *const p) const { return (_p != p); }
+	ZT_ALWAYS_INLINE bool operator==(const ScopedPtr &p) const noexcept { return (_p == p._p); }
+	ZT_ALWAYS_INLINE bool operator!=(const ScopedPtr &p) const noexcept { return (_p != p._p); }
+	ZT_ALWAYS_INLINE bool operator==(T *const p) const noexcept { return (_p == p); }
+	ZT_ALWAYS_INLINE bool operator!=(T *const p) const noexcept { return (_p != p); }
 
 
 private:
 private:
-	ZT_ALWAYS_INLINE ScopedPtr() {}
-	ZT_ALWAYS_INLINE ScopedPtr(const ScopedPtr &p) : _p(nullptr) {}
-	ZT_ALWAYS_INLINE ScopedPtr &operator=(const ScopedPtr &p) { return *this; }
+	ZT_ALWAYS_INLINE ScopedPtr() noexcept {}
+	ZT_ALWAYS_INLINE ScopedPtr(const ScopedPtr &p) noexcept : _p(nullptr) {}
+	ZT_ALWAYS_INLINE ScopedPtr &operator=(const ScopedPtr &p) noexcept { return *this; }
 
 
 	T *const _p;
 	T *const _p;
 };
 };
@@ -60,7 +60,7 @@ private:
 
 
 namespace std {
 namespace std {
 template<typename T>
 template<typename T>
-ZT_ALWAYS_INLINE void swap(ZeroTier::ScopedPtr<T> &a,ZeroTier::ScopedPtr<T> &b) { a.swap(b); }
+ZT_ALWAYS_INLINE void swap(ZeroTier::ScopedPtr<T> &a,ZeroTier::ScopedPtr<T> &b) noexcept { a.swap(b); }
 }
 }
 
 
 #endif
 #endif

+ 38 - 22
node/SharedPtr.hpp

@@ -15,7 +15,6 @@
 #define ZT_SHAREDPTR_HPP
 #define ZT_SHAREDPTR_HPP
 
 
 #include "Constants.hpp"
 #include "Constants.hpp"
-#include "AtomicCounter.hpp"
 #include "TriviallyCopyable.hpp"
 #include "TriviallyCopyable.hpp"
 
 
 namespace ZeroTier {
 namespace ZeroTier {
@@ -25,15 +24,19 @@ namespace ZeroTier {
  *
  *
  * This is an introspective shared pointer. Classes that need to be reference
  * This is an introspective shared pointer. Classes that need to be reference
  * counted must list this as a 'friend' and must have a private instance of
  * counted must list this as a 'friend' and must have a private instance of
- * AtomicCounter called __refCount.
+ * atomic<int> called __refCount.
+ *
+ * This is technically TriviallyCopyable but extreme care must be taken if
+ * one wishes to handle it in this manner. A memcpy must be followed by a
+ * memset of the source to 0 so as to achieve 'move' semantics.
  */
  */
 template<typename T>
 template<typename T>
 class SharedPtr : public TriviallyCopyable
 class SharedPtr : public TriviallyCopyable
 {
 {
 public:
 public:
-	ZT_ALWAYS_INLINE SharedPtr() : _ptr((T *)0) {}
-	explicit ZT_ALWAYS_INLINE SharedPtr(T *obj) : _ptr(obj) { ++obj->__refCount; }
-	ZT_ALWAYS_INLINE SharedPtr(const SharedPtr &sp) : _ptr(sp._getAndInc()) {}
+	ZT_ALWAYS_INLINE SharedPtr() noexcept : _ptr((T *)0) {}
+	explicit ZT_ALWAYS_INLINE SharedPtr(T *obj) noexcept : _ptr(obj) { ++obj->__refCount; }
+	ZT_ALWAYS_INLINE SharedPtr(const SharedPtr &sp) noexcept : _ptr(sp._getAndInc()) {}
 
 
 	ZT_ALWAYS_INLINE ~SharedPtr()
 	ZT_ALWAYS_INLINE ~SharedPtr()
 	{
 	{
@@ -64,19 +67,28 @@ public:
 	 *
 	 *
 	 * @param ptr Naked pointer to assign
 	 * @param ptr Naked pointer to assign
 	 */
 	 */
-	ZT_ALWAYS_INLINE void set(T *ptr)
+	ZT_ALWAYS_INLINE void set(T *ptr) noexcept
 	{
 	{
 		zero();
 		zero();
 		++ptr->__refCount;
 		++ptr->__refCount;
 		_ptr = ptr;
 		_ptr = ptr;
 	}
 	}
 
 
+	/**
+	 * Stupidly set this SharedPtr to 'ptr', ignoring current value and not incrementing reference counter
+	 *
+	 * This must only be used in code that knows what it's doing. :)
+	 *
+	 * @param ptr Pointer to set
+	 */
+	ZT_ALWAYS_INLINE void unsafeSet(T *ptr) noexcept { _ptr = ptr; }
+
 	/**
 	/**
 	 * Swap with another pointer 'for free' without ref count overhead
 	 * Swap with another pointer 'for free' without ref count overhead
 	 *
 	 *
 	 * @param with Pointer to swap with
 	 * @param with Pointer to swap with
 	 */
 	 */
-	ZT_ALWAYS_INLINE void swap(SharedPtr &with)
+	ZT_ALWAYS_INLINE void swap(SharedPtr &with) noexcept
 	{
 	{
 		T *tmp = _ptr;
 		T *tmp = _ptr;
 		_ptr = with._ptr;
 		_ptr = with._ptr;
@@ -84,7 +96,10 @@ public:
 	}
 	}
 
 
 	/**
 	/**
-	 * Set this value to one from another pointer and set that pointer to zero (avoids ref count changes)
+	 * Set this value to one from another pointer and set that pointer to zero (take ownership from)
+	 *
+	 * This is faster than setting and zeroing the source pointer since it
+	 * avoids a synchronized reference count change.
 	 *
 	 *
 	 * @param from Origin pointer; will be zeroed
 	 * @param from Origin pointer; will be zeroed
 	 */
 	 */
@@ -98,14 +113,15 @@ public:
 		from._ptr = nullptr;
 		from._ptr = nullptr;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE operator bool() const { return (_ptr != nullptr); }
-	ZT_ALWAYS_INLINE T &operator*() const { return *_ptr; }
-	ZT_ALWAYS_INLINE T *operator->() const { return _ptr; }
+	ZT_ALWAYS_INLINE operator bool() const noexcept { return (_ptr != nullptr); }
+
+	ZT_ALWAYS_INLINE T &operator*() const noexcept { return *_ptr; }
+	ZT_ALWAYS_INLINE T *operator->() const noexcept { return _ptr; }
 
 
 	/**
 	/**
 	 * @return Raw pointer to held object
 	 * @return Raw pointer to held object
 	 */
 	 */
-	ZT_ALWAYS_INLINE T *ptr() const { return _ptr; }
+	ZT_ALWAYS_INLINE T *ptr() const noexcept { return _ptr; }
 
 
 	/**
 	/**
 	 * Set this pointer to NULL
 	 * Set this pointer to NULL
@@ -122,22 +138,22 @@ public:
 	/**
 	/**
 	 * @return Number of references according to this object's ref count or 0 if NULL
 	 * @return Number of references according to this object's ref count or 0 if NULL
 	 */
 	 */
-	ZT_ALWAYS_INLINE int references()
+	ZT_ALWAYS_INLINE int references() noexcept
 	{
 	{
 		if (_ptr)
 		if (_ptr)
-			return _ptr->__refCount.load();
+			return _ptr->__refCount;
 		return 0;
 		return 0;
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE bool operator==(const SharedPtr &sp) const { return (_ptr == sp._ptr); }
-	ZT_ALWAYS_INLINE bool operator!=(const SharedPtr &sp) const { return (_ptr != sp._ptr); }
-	ZT_ALWAYS_INLINE bool operator>(const SharedPtr &sp) const { return (_ptr > sp._ptr); }
-	ZT_ALWAYS_INLINE bool operator<(const SharedPtr &sp) const { return (_ptr < sp._ptr); }
-	ZT_ALWAYS_INLINE bool operator>=(const SharedPtr &sp) const { return (_ptr >= sp._ptr); }
-	ZT_ALWAYS_INLINE bool operator<=(const SharedPtr &sp) const { return (_ptr <= sp._ptr); }
+	ZT_ALWAYS_INLINE bool operator==(const SharedPtr &sp) const noexcept { return (_ptr == sp._ptr); }
+	ZT_ALWAYS_INLINE bool operator!=(const SharedPtr &sp) const noexcept { return (_ptr != sp._ptr); }
+	ZT_ALWAYS_INLINE bool operator>(const SharedPtr &sp) const noexcept { return (_ptr > sp._ptr); }
+	ZT_ALWAYS_INLINE bool operator<(const SharedPtr &sp) const noexcept { return (_ptr < sp._ptr); }
+	ZT_ALWAYS_INLINE bool operator>=(const SharedPtr &sp) const noexcept { return (_ptr >= sp._ptr); }
+	ZT_ALWAYS_INLINE bool operator<=(const SharedPtr &sp) const noexcept { return (_ptr <= sp._ptr); }
 
 
 private:
 private:
-	ZT_ALWAYS_INLINE T *_getAndInc() const
+	ZT_ALWAYS_INLINE T *_getAndInc() const noexcept
 	{
 	{
 		if (_ptr)
 		if (_ptr)
 			++_ptr->__refCount;
 			++_ptr->__refCount;
@@ -150,7 +166,7 @@ private:
 
 
 namespace std {
 namespace std {
 template<typename T>
 template<typename T>
-ZT_ALWAYS_INLINE void swap(ZeroTier::SharedPtr<T> &a,ZeroTier::SharedPtr<T> &b) { a.swap(b); }
+ZT_ALWAYS_INLINE void swap(ZeroTier::SharedPtr<T> &a,ZeroTier::SharedPtr<T> &b) noexcept { a.swap(b); }
 }
 }
 
 
 #endif
 #endif

+ 3 - 3
node/Tag.cpp

@@ -15,7 +15,7 @@
 
 
 namespace ZeroTier {
 namespace ZeroTier {
 
 
-bool Tag::sign(const Identity &signer)
+bool Tag::sign(const Identity &signer) noexcept
 {
 {
 	uint8_t buf[ZT_TAG_MARSHAL_SIZE_MAX];
 	uint8_t buf[ZT_TAG_MARSHAL_SIZE_MAX];
 	if (signer.hasPrivate()) {
 	if (signer.hasPrivate()) {
@@ -26,7 +26,7 @@ bool Tag::sign(const Identity &signer)
 	return false;
 	return false;
 }
 }
 
 
-int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign) const
+int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign) const noexcept
 {
 {
 	int p = 0;
 	int p = 0;
 	if (forSign) {
 	if (forSign) {
@@ -54,7 +54,7 @@ int Tag::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign) const
 	return p;
 	return p;
 }
 }
 
 
-int Tag::unmarshal(const uint8_t *data,int len)
+int Tag::unmarshal(const uint8_t *data,int len) noexcept
 {
 {
 	if (len < 37)
 	if (len < 37)
 		return -1;
 		return -1;

+ 28 - 28
node/Tag.hpp

@@ -53,9 +53,9 @@ class Tag : public Credential
 	friend class Credential;
 	friend class Credential;
 
 
 public:
 public:
-	static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_TAG; }
+	static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_TAG; }
 
 
-	ZT_ALWAYS_INLINE Tag() { memoryZero(this); }
+	ZT_ALWAYS_INLINE Tag() noexcept { memoryZero(this); }
 
 
 	/**
 	/**
 	 * @param nwid Network ID
 	 * @param nwid Network ID
@@ -64,7 +64,7 @@ public:
 	 * @param id Tag ID
 	 * @param id Tag ID
 	 * @param value Tag value
 	 * @param value Tag value
 	 */
 	 */
-	ZT_ALWAYS_INLINE Tag(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id,const uint32_t value) :
+	ZT_ALWAYS_INLINE Tag(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id,const uint32_t value) noexcept :
 		_id(id),
 		_id(id),
 		_value(value),
 		_value(value),
 		_networkId(nwid),
 		_networkId(nwid),
@@ -75,14 +75,14 @@ public:
 	{
 	{
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE uint32_t id() const { return _id; }
-	ZT_ALWAYS_INLINE const uint32_t &value() const { return _value; }
-	ZT_ALWAYS_INLINE uint64_t networkId() const { return _networkId; }
-	ZT_ALWAYS_INLINE int64_t timestamp() const { return _ts; }
-	ZT_ALWAYS_INLINE const Address &issuedTo() const { return _issuedTo; }
-	ZT_ALWAYS_INLINE const Address &signer() const { return _signedBy; }
-	ZT_ALWAYS_INLINE const uint8_t *signature() const { return _signature; }
-	ZT_ALWAYS_INLINE unsigned int signatureLength() const { return _signatureLength; }
+	ZT_ALWAYS_INLINE uint32_t id() const noexcept { return _id; }
+	ZT_ALWAYS_INLINE const uint32_t &value() const noexcept { return _value; }
+	ZT_ALWAYS_INLINE uint64_t networkId() const noexcept { return _networkId; }
+	ZT_ALWAYS_INLINE int64_t timestamp() const noexcept { return _ts; }
+	ZT_ALWAYS_INLINE const Address &issuedTo() const noexcept { return _issuedTo; }
+	ZT_ALWAYS_INLINE const Address &signer() const noexcept { return _signedBy; }
+	ZT_ALWAYS_INLINE const uint8_t *signature() const noexcept { return _signature; }
+	ZT_ALWAYS_INLINE unsigned int signatureLength() const noexcept { return _signatureLength; }
 
 
 	/**
 	/**
 	 * Sign this tag
 	 * Sign this tag
@@ -90,7 +90,7 @@ public:
 	 * @param signer Signing identity, must have private key
 	 * @param signer Signing identity, must have private key
 	 * @return True if signature was successful
 	 * @return True if signature was successful
 	 */
 	 */
-	bool sign(const Identity &signer);
+	bool sign(const Identity &signer) noexcept;
 
 
 	/**
 	/**
 	 * Check this tag's signature
 	 * Check this tag's signature
@@ -98,30 +98,30 @@ public:
 	 * @param RR Runtime environment to allow identity lookup for signedBy
 	 * @param RR Runtime environment to allow identity lookup for signedBy
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 */
 	 */
-	ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); }
+	ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const noexcept { return _verify(RR,tPtr,*this); }
 
 
-	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_TAG_MARSHAL_SIZE_MAX; }
-	int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign = false) const;
-	int unmarshal(const uint8_t *data,int len);
+	static constexpr int marshalSizeMax() noexcept { return ZT_TAG_MARSHAL_SIZE_MAX; }
+	int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX],bool forSign = false) const noexcept;
+	int unmarshal(const uint8_t *data,int len) noexcept;
 
 
 	// Provides natural sort order by ID
 	// Provides natural sort order by ID
-	ZT_ALWAYS_INLINE bool operator<(const Tag &t) const { return (_id < t._id); }
+	ZT_ALWAYS_INLINE bool operator<(const Tag &t) const noexcept { return (_id < t._id); }
 
 
-	ZT_ALWAYS_INLINE bool operator==(const Tag &t) const { return (memcmp(this,&t,sizeof(Tag)) == 0); }
-	ZT_ALWAYS_INLINE bool operator!=(const Tag &t) const { return (memcmp(this,&t,sizeof(Tag)) != 0); }
+	ZT_ALWAYS_INLINE bool operator==(const Tag &t) const noexcept { return (memcmp(this,&t,sizeof(Tag)) == 0); }
+	ZT_ALWAYS_INLINE bool operator!=(const Tag &t) const noexcept { return (memcmp(this,&t,sizeof(Tag)) != 0); }
 
 
 	// For searching sorted arrays or lists of Tags by ID
 	// For searching sorted arrays or lists of Tags by ID
 	struct IdComparePredicate
 	struct IdComparePredicate
 	{
 	{
-		ZT_ALWAYS_INLINE bool operator()(const Tag &a,const Tag &b) const { return (a.id() < b.id()); }
-		ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const Tag &b) const { return (a < b.id()); }
-		ZT_ALWAYS_INLINE bool operator()(const Tag &a,const uint32_t b) const { return (a.id() < b); }
-		ZT_ALWAYS_INLINE bool operator()(const Tag *a,const Tag *b) const { return (a->id() < b->id()); }
-		ZT_ALWAYS_INLINE bool operator()(const Tag *a,const Tag &b) const { return (a->id() < b.id()); }
-		ZT_ALWAYS_INLINE bool operator()(const Tag &a,const Tag *b) const { return (a.id() < b->id()); }
-		ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const Tag *b) const { return (a < b->id()); }
-		ZT_ALWAYS_INLINE bool operator()(const Tag *a,const uint32_t b) const { return (a->id() < b); }
-		ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const uint32_t b) const { return (a < b); }
+		ZT_ALWAYS_INLINE bool operator()(const Tag &a,const Tag &b) const noexcept { return (a.id() < b.id()); }
+		ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const Tag &b) const noexcept { return (a < b.id()); }
+		ZT_ALWAYS_INLINE bool operator()(const Tag &a,const uint32_t b) const noexcept { return (a.id() < b); }
+		ZT_ALWAYS_INLINE bool operator()(const Tag *a,const Tag *b) const noexcept { return (a->id() < b->id()); }
+		ZT_ALWAYS_INLINE bool operator()(const Tag *a,const Tag &b) const noexcept { return (a->id() < b.id()); }
+		ZT_ALWAYS_INLINE bool operator()(const Tag &a,const Tag *b) const noexcept { return (a.id() < b->id()); }
+		ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const Tag *b) const noexcept { return (a < b->id()); }
+		ZT_ALWAYS_INLINE bool operator()(const Tag *a,const uint32_t b) const noexcept { return (a->id() < b); }
+		ZT_ALWAYS_INLINE bool operator()(const uint32_t a,const uint32_t b) const noexcept { return (a < b); }
 	};
 	};
 
 
 private:
 private:

+ 29 - 17
node/Topology.cpp

@@ -75,13 +75,20 @@ Topology::~Topology()
 SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
 SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
 {
 {
 	RWMutex::Lock _l(_peers_l);
 	RWMutex::Lock _l(_peers_l);
+
 	SharedPtr<Peer> &hp = _peers[peer->address()];
 	SharedPtr<Peer> &hp = _peers[peer->address()];
 	if (hp)
 	if (hp)
 		return hp;
 		return hp;
+
 	_loadCached(tPtr,peer->address(),hp);
 	_loadCached(tPtr,peer->address(),hp);
-	if (hp)
+	if (hp) {
+		_peersByIncomingProbe[peer->incomingProbe()] = hp;
 		return hp;
 		return hp;
+	}
+
 	hp = peer;
 	hp = peer;
+	_peersByIncomingProbe[peer->incomingProbe()] = peer;
+
 	return peer;
 	return peer;
 }
 }
 
 
@@ -196,6 +203,7 @@ void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
 		while (i.next(a,p)) {
 		while (i.next(a,p)) {
 			if ( (!(*p)->alive(now)) && (_roots.count((*p)->identity()) == 0) ) {
 			if ( (!(*p)->alive(now)) && (_roots.count((*p)->identity()) == 0) ) {
 				(*p)->save(tPtr);
 				(*p)->save(tPtr);
+				_peersByIncomingProbe.erase((*p)->incomingProbe());
 				_peers.erase(*a);
 				_peers.erase(*a);
 			}
 			}
 		}
 		}
@@ -227,24 +235,28 @@ void Topology::saveAll(void *tPtr)
 
 
 void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
 void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
 {
 {
-	uint64_t id[2];
-	id[0] = zta.toInt();
-	id[1] = 0;
-	std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_PEER,id));
-	if (!data.empty()) {
-		const uint8_t *d = data.data();
-		int dl = (int)data.size();
-		for(;;) {
-			Peer *const p = new Peer(RR);
-			int n = p->unmarshal(d,dl);
-			if (n > 0) {
-				// TODO: will eventually handle multiple peers
-				peer.set(p);
-				return;
-			} else {
-				delete p;
+	try {
+		uint64_t id[2];
+		id[0] = zta.toInt();
+		id[1] = 0;
+		std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_PEER,id));
+		if (!data.empty()) {
+			const uint8_t *d = data.data();
+			int dl = (int)data.size();
+			for (;;) {
+				Peer *const p = new Peer(RR);
+				int n = p->unmarshal(d,dl);
+				if (n > 0) {
+					// TODO: will eventually handle multiple peers
+					peer.set(p);
+					return;
+				} else {
+					delete p;
+				}
 			}
 			}
 		}
 		}
+	} catch ( ... ) {
+		peer.zero();
 	}
 	}
 }
 }
 
 

+ 33 - 19
node/Topology.hpp

@@ -60,9 +60,10 @@ public:
 	 *
 	 *
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 * @param zta ZeroTier address of peer
 	 * @param zta ZeroTier address of peer
+	 * @param loadFromCached If false do not load from cache if not in memory (default: true)
 	 * @return Peer or NULL if not found
 	 * @return Peer or NULL if not found
 	 */
 	 */
-	ZT_ALWAYS_INLINE SharedPtr<Peer> get(void *tPtr,const Address &zta)
+	ZT_ALWAYS_INLINE SharedPtr<Peer> peer(void *tPtr,const Address &zta,const bool loadFromCached = true)
 	{
 	{
 		{
 		{
 			RWMutex::RLock _l(_peers_l);
 			RWMutex::RLock _l(_peers_l);
@@ -72,17 +73,34 @@ public:
 		}
 		}
 
 
 		SharedPtr<Peer> p;
 		SharedPtr<Peer> p;
-		_loadCached(tPtr,zta,p);
-		if (p) {
-			RWMutex::Lock _l(_peers_l);
-			SharedPtr<Peer> &hp = _peers[zta];
-			if (!hp)
-				hp = p;
+		if (loadFromCached) {
+			_loadCached(tPtr,zta,p);
+			if (p) {
+				RWMutex::Lock _l(_peers_l);
+				SharedPtr<Peer> &hp = _peers[zta];
+				if (!hp)
+					hp = p;
+			}
 		}
 		}
 
 
 		return p;
 		return p;
 	}
 	}
 
 
+	/**
+	 * Get a peer by its incoming short probe packet payload
+	 *
+	 * @param probe Short probe payload (in big-endian byte order)
+	 * @return Peer or NULL if no peer is currently in memory matching this probe (cache is not checked in this case)
+	 */
+	ZT_ALWAYS_INLINE SharedPtr<Peer> peerByProbe(const uint64_t probe)
+	{
+		RWMutex::RLock _l(_peers_l);
+		const SharedPtr<Peer> *const ap = _peersByIncomingProbe.get(probe);
+		if (ap)
+			return *ap;
+		return SharedPtr<Peer>();
+	}
+
 	/**
 	/**
 	 * Get a Path object for a given local and remote physical address, creating if needed
 	 * Get a Path object for a given local and remote physical address, creating if needed
 	 *
 	 *
@@ -90,7 +108,7 @@ public:
 	 * @param r Remote address
 	 * @param r Remote address
 	 * @return Pointer to canonicalized Path object or NULL on error
 	 * @return Pointer to canonicalized Path object or NULL on error
 	 */
 	 */
-	ZT_ALWAYS_INLINE SharedPtr<Path> getPath(const int64_t l,const InetAddress &r)
+	ZT_ALWAYS_INLINE SharedPtr<Path> path(const int64_t l,const InetAddress &r)
 	{
 	{
 		const uint64_t k = _pathHash(l,r);
 		const uint64_t k = _pathHash(l,r);
 
 
@@ -174,25 +192,20 @@ public:
 	{
 	{
 		RWMutex::RLock l(_peers_l);
 		RWMutex::RLock l(_peers_l);
 
 
-		const unsigned long rootPeerCnt = _rootPeers.size();
-		uintptr_t *const rootPeerPtrs = (uintptr_t *)malloc(sizeof(uintptr_t) * rootPeerCnt);
-		if (!rootPeerPtrs)
-			throw std::bad_alloc();
-		for(unsigned long i=0;i<rootPeerCnt;++i)
-			rootPeerPtrs[i] = (uintptr_t)_rootPeers[i].ptr();
-		std::sort(rootPeerPtrs,rootPeerPtrs + rootPeerCnt);
-		uintptr_t *const rootPeerPtrsEnd = rootPeerPtrs + rootPeerCnt;
+		std::vector<uintptr_t> rootPeerPtrs;
+		rootPeerPtrs.reserve(_rootPeers.size());
+		for(std::vector< SharedPtr<Peer> >::const_iterator rp(_rootPeers.begin());rp!=_rootPeers.end();++rp)
+			rootPeerPtrs.push_back((uintptr_t)rp->ptr());
+		std::sort(rootPeerPtrs.begin(),rootPeerPtrs.end());
 
 
 		try {
 		try {
 			Hashtable< Address,SharedPtr<Peer> >::Iterator i(const_cast<Topology *>(this)->_peers);
 			Hashtable< Address,SharedPtr<Peer> >::Iterator i(const_cast<Topology *>(this)->_peers);
 			Address *a = nullptr;
 			Address *a = nullptr;
 			SharedPtr<Peer> *p = nullptr;
 			SharedPtr<Peer> *p = nullptr;
 			while (i.next(a,p)) {
 			while (i.next(a,p)) {
-				f(*((const SharedPtr<Peer> *)p),std::binary_search(rootPeerPtrs,rootPeerPtrsEnd,(uintptr_t)p->ptr()));
+				f(*((const SharedPtr<Peer> *)p),std::binary_search(rootPeerPtrs.begin(),rootPeerPtrs.end(),(uintptr_t)p->ptr()));
 			}
 			}
 		} catch ( ... ) {} // should not throw
 		} catch ( ... ) {} // should not throw
-
-		free((void *)rootPeerPtrs);
 	}
 	}
 
 
 	/**
 	/**
@@ -345,6 +358,7 @@ private:
 	unsigned int _numConfiguredPhysicalPaths;
 	unsigned int _numConfiguredPhysicalPaths;
 
 
 	Hashtable< Address,SharedPtr<Peer> > _peers;
 	Hashtable< Address,SharedPtr<Peer> > _peers;
+	Hashtable< uint64_t,SharedPtr<Peer> > _peersByIncomingProbe;
 	Hashtable< uint64_t,SharedPtr<Path> > _paths;
 	Hashtable< uint64_t,SharedPtr<Path> > _paths;
 	std::set< Identity > _roots; // locked by _peers_l
 	std::set< Identity > _roots; // locked by _peers_l
 	std::vector< SharedPtr<Peer> > _rootPeers; // locked by _peers_l
 	std::vector< SharedPtr<Peer> > _rootPeers; // locked by _peers_l

+ 30 - 0
node/Trace.cpp

@@ -15,6 +15,8 @@
 #include "RuntimeEnvironment.hpp"
 #include "RuntimeEnvironment.hpp"
 #include "Node.hpp"
 #include "Node.hpp"
 #include "Peer.hpp"
 #include "Peer.hpp"
+#include "Path.hpp"
+#include "InetAddress.hpp"
 
 
 #include <cstdio>
 #include <cstdio>
 #include <cstdlib>
 #include <cstdlib>
@@ -33,6 +35,34 @@ Trace::Trace(const RuntimeEnvironment *renv) :
 {
 {
 }
 }
 
 
+Trace::Str<ZT_INETADDRESS_STRING_SIZE_MAX> Trace::str(const InetAddress &a,const bool ipOnly)
+{
+	Str<ZT_INETADDRESS_STRING_SIZE_MAX> s;
+	if (ipOnly)
+		a.toIpString(s.s);
+	else a.toString(s.s);
+	return s;
+}
+
+Trace::Str<ZT_ADDRESS_STRING_SIZE_MAX> Trace::str(const Address &a)
+{
+	Str<ZT_ADDRESS_STRING_SIZE_MAX> s;
+	a.toString(s.s);
+	return s;
+}
+
+Trace::Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> Trace::str(const Address &peerAddress,const SharedPtr<Path> &path)
+{
+	Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> s;
+	peerAddress.toString(s.s);
+	s.s[11] = '(';
+	path->address().toString(s.s + 12);
+	int x = strlen(s.s);
+	s.s[x] = ')';
+	s.s[x+1] = 0;
+	return s;
+}
+
 void Trace::unexpectedError(
 void Trace::unexpectedError(
 	void *tPtr,
 	void *tPtr,
 	uint32_t codeLocation,
 	uint32_t codeLocation,

+ 32 - 5
node/Trace.hpp

@@ -14,11 +14,6 @@
 #ifndef ZT_TRACE_HPP
 #ifndef ZT_TRACE_HPP
 #define ZT_TRACE_HPP
 #define ZT_TRACE_HPP
 
 
-#include <cstdint>
-#include <cstring>
-#include <cstdlib>
-#include <vector>
-
 #include "Constants.hpp"
 #include "Constants.hpp"
 #include "SharedPtr.hpp"
 #include "SharedPtr.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
@@ -26,6 +21,11 @@
 #include "Address.hpp"
 #include "Address.hpp"
 #include "MAC.hpp"
 #include "MAC.hpp"
 
 
+#include <cstdint>
+#include <cstring>
+#include <cstdlib>
+#include <vector>
+
 namespace ZeroTier {
 namespace ZeroTier {
 
 
 class RuntimeEnvironment;
 class RuntimeEnvironment;
@@ -42,6 +42,16 @@ struct NetworkConfig;
 
 
 /**
 /**
  * Remote tracing and trace logging handler
  * Remote tracing and trace logging handler
+ *
+ * These methods are called when things happen that may be of interested to
+ * someone debugging ZeroTier or its virtual networks. The codeLocation parameter
+ * is an arbitrary pseudo-random identifier of the form 0xNNNNNNNN that could be
+ * easily found by searching the code base. This makes it easy to locate the
+ * specific line where a trace originated without relying on brittle non-portable
+ * things like source file and line number. The same identifier should be used
+ * for the same 'place' in the code across versions. These could eventually be
+ * turned into constants that are semi-official and stored in a database to
+ * provide extra debug context.
  */
  */
 class Trace
 class Trace
 {
 {
@@ -64,8 +74,25 @@ public:
 		}
 		}
 	};
 	};
 
 
+	/**
+	 * Simple container for a C string
+	 *
+	 * @tparam C Capacity of string
+	 */
+	template<unsigned int C>
+	struct Str
+	{
+		ZT_ALWAYS_INLINE Str() { memset(s,0,sizeof(s)); }
+		constexpr static unsigned int capacity() { return C; }
+		char s[C];
+	};
+
 	explicit Trace(const RuntimeEnvironment *renv);
 	explicit Trace(const RuntimeEnvironment *renv);
 
 
+	static Str<ZT_INETADDRESS_STRING_SIZE_MAX> str(const InetAddress &a,bool ipOnly = false);
+	static Str<ZT_ADDRESS_STRING_SIZE_MAX> str(const Address &a);
+	static Str<ZT_ADDRESS_STRING_SIZE_MAX + ZT_INETADDRESS_STRING_SIZE_MAX + 4> str(const Address &peerAddress,const SharedPtr<Path> &path);
+
 	void unexpectedError(
 	void unexpectedError(
 		void *tPtr,
 		void *tPtr,
 		uint32_t codeLocation,
 		uint32_t codeLocation,

+ 10 - 10
node/TriviallyCopyable.hpp

@@ -39,7 +39,7 @@ public:
 	 * @param obj Any TriviallyCopyable object
 	 * @param obj Any TriviallyCopyable object
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryBurn(T *obj)
+	static ZT_ALWAYS_INLINE void memoryBurn(T *obj) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = obj;
 		TriviallyCopyable *const tmp = obj;
 		Utils::burn(tmp,sizeof(T));
 		Utils::burn(tmp,sizeof(T));
@@ -52,7 +52,7 @@ public:
 	 * @param obj Any TriviallyCopyable object
 	 * @param obj Any TriviallyCopyable object
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryBurn(T &obj)
+	static ZT_ALWAYS_INLINE void memoryBurn(T &obj) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = &obj;
 		TriviallyCopyable *const tmp = &obj;
 		Utils::burn(tmp,sizeof(T));
 		Utils::burn(tmp,sizeof(T));
@@ -65,7 +65,7 @@ public:
 	 * @param obj Any TriviallyCopyable object
 	 * @param obj Any TriviallyCopyable object
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryZero(T *obj)
+	static ZT_ALWAYS_INLINE void memoryZero(T *obj) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = obj;
 		TriviallyCopyable *const tmp = obj;
 		memset(tmp,0,sizeof(T));
 		memset(tmp,0,sizeof(T));
@@ -78,7 +78,7 @@ public:
 	 * @param obj Any TriviallyCopyable object
 	 * @param obj Any TriviallyCopyable object
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryZero(T &obj)
+	static ZT_ALWAYS_INLINE void memoryZero(T &obj) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = &obj;
 		TriviallyCopyable *const tmp = &obj;
 		memset(tmp,0,sizeof(T));
 		memset(tmp,0,sizeof(T));
@@ -92,7 +92,7 @@ public:
 	 * @param src Source memory of same size or less than sizeof(dest)
 	 * @param src Source memory of same size or less than sizeof(dest)
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T *dest,const void *src)
+	static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T *dest,const void *src) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = dest;
 		TriviallyCopyable *const tmp = dest;
 		memcpy(tmp,src,sizeof(T));
 		memcpy(tmp,src,sizeof(T));
@@ -106,7 +106,7 @@ public:
 	 * @param src Source memory of same size or less than sizeof(dest)
 	 * @param src Source memory of same size or less than sizeof(dest)
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T &dest,const void *src)
+	static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T &dest,const void *src) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = &dest;
 		TriviallyCopyable *const tmp = &dest;
 		memcpy(tmp,src,sizeof(T));
 		memcpy(tmp,src,sizeof(T));
@@ -120,7 +120,7 @@ public:
 	 * @param src Source TriviallyCopyable object
 	 * @param src Source TriviallyCopyable object
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T *src)
+	static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T *src) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = dest;
 		TriviallyCopyable *const tmp = dest;
 		memcpy(tmp,src,sizeof(T));
 		memcpy(tmp,src,sizeof(T));
@@ -134,7 +134,7 @@ public:
 	 * @param src Source TriviallyCopyable object
 	 * @param src Source TriviallyCopyable object
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T &src)
+	static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T &src) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = dest;
 		TriviallyCopyable *const tmp = dest;
 		memcpy(tmp,&src,sizeof(T));
 		memcpy(tmp,&src,sizeof(T));
@@ -148,7 +148,7 @@ public:
 	 * @param src Source TriviallyCopyable object
 	 * @param src Source TriviallyCopyable object
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T *src)
+	static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T *src) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = &dest;
 		TriviallyCopyable *const tmp = &dest;
 		memcpy(tmp,src,sizeof(T));
 		memcpy(tmp,src,sizeof(T));
@@ -162,7 +162,7 @@ public:
 	 * @param src Source TriviallyCopyable object
 	 * @param src Source TriviallyCopyable object
 	 */
 	 */
 	template<typename T>
 	template<typename T>
-	static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T &src)
+	static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T &src) noexcept
 	{
 	{
 		TriviallyCopyable *const tmp = &dest;
 		TriviallyCopyable *const tmp = &dest;
 		memcpy(tmp,&src,sizeof(T));
 		memcpy(tmp,&src,sizeof(T));

+ 16 - 16
node/Utils.cpp

@@ -60,7 +60,7 @@ CPUIDRegisters CPUID;
 const uint64_t ZERO256[4] = { 0,0,0,0 };
 const uint64_t ZERO256[4] = { 0,0,0,0 };
 const char HEXCHARS[16] = { '0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f' };
 const char HEXCHARS[16] = { '0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f' };
 
 
-bool secureEq(const void *a,const void *b,unsigned int len)
+bool secureEq(const void *a,const void *b,unsigned int len) noexcept
 {
 {
 	uint8_t diff = 0;
 	uint8_t diff = 0;
 	for(unsigned int i=0;i<len;++i)
 	for(unsigned int i=0;i<len;++i)
@@ -87,7 +87,7 @@ static unsigned long _Utils_itoa(unsigned long n,char *s)
 	s[pos] = (char)('0' + (n % 10));
 	s[pos] = (char)('0' + (n % 10));
 	return pos + 1;
 	return pos + 1;
 }
 }
-char *decimal(unsigned long n,char s[24])
+char *decimal(unsigned long n,char s[24]) noexcept
 {
 {
 	if (n == 0) {
 	if (n == 0) {
 		s[0] = '0';
 		s[0] = '0';
@@ -98,7 +98,7 @@ char *decimal(unsigned long n,char s[24])
 	return s;
 	return s;
 }
 }
 
 
-char *hex(uint8_t i,char s[3])
+char *hex(uint8_t i,char s[3]) noexcept
 {
 {
 	s[0] = HEXCHARS[(i >> 4U) & 0xfU];
 	s[0] = HEXCHARS[(i >> 4U) & 0xfU];
 	s[1] = HEXCHARS[i & 0xfU];
 	s[1] = HEXCHARS[i & 0xfU];
@@ -106,7 +106,7 @@ char *hex(uint8_t i,char s[3])
 	return s;
 	return s;
 }
 }
 
 
-char *hex(uint16_t i,char s[5])
+char *hex(uint16_t i,char s[5]) noexcept
 {
 {
 	s[0] = HEXCHARS[(i >> 12U) & 0xfU];
 	s[0] = HEXCHARS[(i >> 12U) & 0xfU];
 	s[1] = HEXCHARS[(i >> 8U) & 0xfU];
 	s[1] = HEXCHARS[(i >> 8U) & 0xfU];
@@ -116,7 +116,7 @@ char *hex(uint16_t i,char s[5])
 	return s;
 	return s;
 }
 }
 
 
-char *hex(uint32_t i,char s[9])
+char *hex(uint32_t i,char s[9]) noexcept
 {
 {
 	s[0] = HEXCHARS[(i >> 28U) & 0xfU];
 	s[0] = HEXCHARS[(i >> 28U) & 0xfU];
 	s[1] = HEXCHARS[(i >> 24U) & 0xfU];
 	s[1] = HEXCHARS[(i >> 24U) & 0xfU];
@@ -130,7 +130,7 @@ char *hex(uint32_t i,char s[9])
 	return s;
 	return s;
 }
 }
 
 
-char *hex(uint64_t i,char s[17])
+char *hex(uint64_t i,char s[17]) noexcept
 {
 {
 	s[0] = HEXCHARS[(i >> 60U) & 0xfU];
 	s[0] = HEXCHARS[(i >> 60U) & 0xfU];
 	s[1] = HEXCHARS[(i >> 56U) & 0xfU];
 	s[1] = HEXCHARS[(i >> 56U) & 0xfU];
@@ -152,7 +152,7 @@ char *hex(uint64_t i,char s[17])
 	return s;
 	return s;
 }
 }
 
 
-uint64_t unhex(const char *s)
+uint64_t unhex(const char *s) noexcept
 {
 {
 	uint64_t n = 0;
 	uint64_t n = 0;
 	if (s) {
 	if (s) {
@@ -177,7 +177,7 @@ uint64_t unhex(const char *s)
 	return n;
 	return n;
 }
 }
 
 
-char *hex10(uint64_t i,char s[11])
+char *hex10(uint64_t i,char s[11]) noexcept
 {
 {
 	s[0] = HEXCHARS[(i >> 36U) & 0xfU];
 	s[0] = HEXCHARS[(i >> 36U) & 0xfU];
 	s[1] = HEXCHARS[(i >> 32U) & 0xfU];
 	s[1] = HEXCHARS[(i >> 32U) & 0xfU];
@@ -193,7 +193,7 @@ char *hex10(uint64_t i,char s[11])
 	return s;
 	return s;
 }
 }
 
 
-char *hex(const void *d,unsigned int l,char *s)
+char *hex(const void *d,unsigned int l,char *s) noexcept
 {
 {
 	char *const save = s;
 	char *const save = s;
 	for(unsigned int i=0;i<l;++i) {
 	for(unsigned int i=0;i<l;++i) {
@@ -205,7 +205,7 @@ char *hex(const void *d,unsigned int l,char *s)
 	return save;
 	return save;
 }
 }
 
 
-unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen)
+unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen) noexcept
 {
 {
 	unsigned int l = 0;
 	unsigned int l = 0;
 	const char *hend = h + hlen;
 	const char *hend = h + hlen;
@@ -239,7 +239,7 @@ unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen
 	return l;
 	return l;
 }
 }
 
 
-void getSecureRandom(void *buf,unsigned int bytes)
+void getSecureRandom(void *buf,unsigned int bytes) noexcept
 {
 {
 	static Mutex globalLock;
 	static Mutex globalLock;
 	static bool initialized = false;
 	static bool initialized = false;
@@ -322,14 +322,14 @@ void getSecureRandom(void *buf,unsigned int bytes)
 	}
 	}
 }
 }
 
 
-uint64_t getSecureRandomU64()
+uint64_t getSecureRandomU64() noexcept
 {
 {
 	uint64_t tmp = 0;
 	uint64_t tmp = 0;
 	getSecureRandom(&tmp,sizeof(tmp));
 	getSecureRandom(&tmp,sizeof(tmp));
 	return tmp;
 	return tmp;
 }
 }
 
 
-int b32e(const uint8_t *data,int length,char *result,int bufSize)
+int b32e(const uint8_t *data,int length,char *result,int bufSize) noexcept
 {
 {
   if (length < 0 || length > (1 << 28)) {
   if (length < 0 || length > (1 << 28)) {
 		result[0] = (char)0;
 		result[0] = (char)0;
@@ -365,7 +365,7 @@ int b32e(const uint8_t *data,int length,char *result,int bufSize)
 	return -1;
 	return -1;
 }
 }
 
 
-int b32d(const char *encoded,uint8_t *result,int bufSize)
+int b32d(const char *encoded,uint8_t *result,int bufSize) noexcept
 {
 {
   int buffer = 0;
   int buffer = 0;
   int bitsLeft = 0;
   int bitsLeft = 0;
@@ -406,7 +406,7 @@ int b32d(const char *encoded,uint8_t *result,int bufSize)
 }
 }
 
 
 #define ROL64(x,k) (((x) << (k)) | ((x) >> (64 - (k))))
 #define ROL64(x,k) (((x) << (k)) | ((x) >> (64 - (k))))
-uint64_t random()
+uint64_t random() noexcept
 {
 {
 	// https://en.wikipedia.org/wiki/Xorshift#xoshiro256**
 	// https://en.wikipedia.org/wiki/Xorshift#xoshiro256**
 	static volatile uint64_t s_s0 = getSecureRandomU64();
 	static volatile uint64_t s_s0 = getSecureRandomU64();
@@ -434,7 +434,7 @@ uint64_t random()
 	return result;
 	return result;
 }
 }
 
 
-bool scopy(char *dest,unsigned int len,const char *src)
+bool scopy(char *dest,unsigned int len,const char *src) noexcept
 {
 {
 	if (!len)
 	if (!len)
 		return false; // sanity check
 		return false; // sanity check

+ 61 - 48
node/Utils.hpp

@@ -64,7 +64,7 @@ extern const char HEXCHARS[16];
  * @param len Length of strings
  * @param len Length of strings
  * @return True if strings are equal
  * @return True if strings are equal
  */
  */
-bool secureEq(const void *a,const void *b,unsigned int len);
+bool secureEq(const void *a,const void *b,unsigned int len) noexcept;
 
 
 /**
 /**
  * Be absolutely sure to zero memory
  * Be absolutely sure to zero memory
@@ -81,7 +81,7 @@ void burn(void *ptr,unsigned int len);
  * @param s Buffer, at least 24 bytes in size
  * @param s Buffer, at least 24 bytes in size
  * @return String containing 'n' in base 10 form
  * @return String containing 'n' in base 10 form
  */
  */
-char *decimal(unsigned long n,char s[24]);
+char *decimal(unsigned long n,char s[24]) noexcept;
 
 
 /**
 /**
  * Convert an unsigned integer into hex
  * Convert an unsigned integer into hex
@@ -90,10 +90,10 @@ char *decimal(unsigned long n,char s[24]);
  * @param s Buffer to receive hex, must be at least (2*sizeof(i))+1 in size or overflow will occur.
  * @param s Buffer to receive hex, must be at least (2*sizeof(i))+1 in size or overflow will occur.
  * @return Pointer to s containing hex string with trailing zero byte
  * @return Pointer to s containing hex string with trailing zero byte
  */
  */
-char *hex(uint8_t i,char s[3]);
-char *hex(uint16_t i,char s[5]);
-char *hex(uint32_t i,char s[9]);
-char *hex(uint64_t i,char s[17]);
+char *hex(uint8_t i,char s[3]) noexcept;
+char *hex(uint16_t i,char s[5]) noexcept;
+char *hex(uint32_t i,char s[9]) noexcept;
+char *hex(uint64_t i,char s[17]) noexcept;
 
 
 /**
 /**
  * Decode an unsigned integer in hex format
  * Decode an unsigned integer in hex format
@@ -101,7 +101,7 @@ char *hex(uint64_t i,char s[17]);
  * @param s String to decode, non-hex chars are ignored
  * @param s String to decode, non-hex chars are ignored
  * @return Unsigned integer
  * @return Unsigned integer
  */
  */
-uint64_t unhex(const char *s);
+uint64_t unhex(const char *s) noexcept;
 
 
 /**
 /**
  * Convert the least significant 40 bits of a uint64_t to hex
  * Convert the least significant 40 bits of a uint64_t to hex
@@ -110,7 +110,7 @@ uint64_t unhex(const char *s);
  * @param s Buffer of size [11] to receive 10 hex characters
  * @param s Buffer of size [11] to receive 10 hex characters
  * @return Pointer to buffer
  * @return Pointer to buffer
  */
  */
-char *hex10(uint64_t i,char s[11]);
+char *hex10(uint64_t i,char s[11]) noexcept;
 
 
 /**
 /**
  * Convert a byte array into hex
  * Convert a byte array into hex
@@ -120,7 +120,7 @@ char *hex10(uint64_t i,char s[11]);
  * @param s String buffer, must be at least (l*2)+1 in size or overflow will occur
  * @param s String buffer, must be at least (l*2)+1 in size or overflow will occur
  * @return Pointer to filled string buffer
  * @return Pointer to filled string buffer
  */
  */
-char *hex(const void *d,unsigned int l,char *s);
+char *hex(const void *d,unsigned int l,char *s) noexcept;
 
 
 /**
 /**
  * Decode a hex string
  * Decode a hex string
@@ -131,7 +131,7 @@ char *hex(const void *d,unsigned int l,char *s);
  * @param buflen Length of output buffer
  * @param buflen Length of output buffer
  * @return Number of written bytes
  * @return Number of written bytes
  */
  */
-unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen);
+unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen) noexcept;
 
 
 /**
 /**
  * Generate secure random bytes
  * Generate secure random bytes
@@ -142,12 +142,12 @@ unsigned int unhex(const char *h,unsigned int hlen,void *buf,unsigned int buflen
  * @param buf Buffer to fill
  * @param buf Buffer to fill
  * @param bytes Number of random bytes to generate
  * @param bytes Number of random bytes to generate
  */
  */
-void getSecureRandom(void *buf,unsigned int bytes);
+void getSecureRandom(void *buf,unsigned int bytes) noexcept;
 
 
 /**
 /**
  * @return Secure random 64-bit integer
  * @return Secure random 64-bit integer
  */
  */
-uint64_t getSecureRandomU64();
+uint64_t getSecureRandomU64() noexcept;
 
 
 /**
 /**
  * Encode string to base32
  * Encode string to base32
@@ -158,7 +158,7 @@ uint64_t getSecureRandomU64();
  * @param bufSize Size of result buffer
  * @param bufSize Size of result buffer
  * @return Number of bytes written
  * @return Number of bytes written
  */
  */
-int b32e(const uint8_t *data,int length,char *result,int bufSize);
+int b32e(const uint8_t *data,int length,char *result,int bufSize) noexcept;
 
 
 /**
 /**
  * Decode base32 string
  * Decode base32 string
@@ -168,12 +168,12 @@ int b32e(const uint8_t *data,int length,char *result,int bufSize);
  * @param bufSize Size of result buffer
  * @param bufSize Size of result buffer
  * @return Number of bytes written or -1 on error
  * @return Number of bytes written or -1 on error
  */
  */
-int b32d(const char *encoded, uint8_t *result, int bufSize);
+int b32d(const char *encoded, uint8_t *result, int bufSize) noexcept;
 
 
 /**
 /**
  * Get a non-cryptographic random integer
  * Get a non-cryptographic random integer
  */
  */
-uint64_t random();
+uint64_t random() noexcept;
 
 
 /**
 /**
  * Perform a safe C string copy, ALWAYS null-terminating the result
  * Perform a safe C string copy, ALWAYS null-terminating the result
@@ -186,7 +186,7 @@ uint64_t random();
  * @param src Source string (if NULL, dest will receive a zero-length string and true is returned)
  * @param src Source string (if NULL, dest will receive a zero-length string and true is returned)
  * @return True on success, false on overflow (buffer will still be 0-terminated)
  * @return True on success, false on overflow (buffer will still be 0-terminated)
  */
  */
-bool scopy(char *dest,unsigned int len,const char *src);
+bool scopy(char *dest,unsigned int len,const char *src) noexcept;
 
 
 /**
 /**
  * Mix bits in a 64-bit integer
  * Mix bits in a 64-bit integer
@@ -196,7 +196,7 @@ bool scopy(char *dest,unsigned int len,const char *src);
  * @param x Integer to mix
  * @param x Integer to mix
  * @return Hashed value
  * @return Hashed value
  */
  */
-static ZT_ALWAYS_INLINE uint64_t hash64(uint64_t x)
+static ZT_ALWAYS_INLINE uint64_t hash64(uint64_t x) noexcept
 {
 {
 	x ^= x >> 30U;
 	x ^= x >> 30U;
 	x *= 0xbf58476d1ce4e5b9ULL;
 	x *= 0xbf58476d1ce4e5b9ULL;
@@ -211,7 +211,7 @@ static ZT_ALWAYS_INLINE uint64_t hash64(uint64_t x)
  * @param l Length of buffer
  * @param l Length of buffer
  * @return True if buffer is all zero
  * @return True if buffer is all zero
  */
  */
-static ZT_ALWAYS_INLINE bool allZero(const void *const b,const unsigned int l)
+static ZT_ALWAYS_INLINE bool allZero(const void *const b,const unsigned int l) noexcept
 {
 {
 	const uint8_t *x = reinterpret_cast<const uint8_t *>(b);
 	const uint8_t *x = reinterpret_cast<const uint8_t *>(b);
 	const uint8_t *const y = x + l;
 	const uint8_t *const y = x + l;
@@ -231,7 +231,7 @@ static ZT_ALWAYS_INLINE bool allZero(const void *const b,const unsigned int l)
  * @param saveptr Pointer to pointer where function can save state
  * @param saveptr Pointer to pointer where function can save state
  * @return Next token or NULL if none
  * @return Next token or NULL if none
  */
  */
-static ZT_ALWAYS_INLINE char *stok(char *str,const char *delim,char **saveptr)
+static ZT_ALWAYS_INLINE char *stok(char *str,const char *delim,char **saveptr) noexcept
 {
 {
 #ifdef __WINDOWS__
 #ifdef __WINDOWS__
 	return strtok_s(str,delim,saveptr);
 	return strtok_s(str,delim,saveptr);
@@ -240,9 +240,9 @@ static ZT_ALWAYS_INLINE char *stok(char *str,const char *delim,char **saveptr)
 #endif
 #endif
 }
 }
 
 
-static ZT_ALWAYS_INLINE unsigned int strToUInt(const char *s) { return (unsigned int)strtoul(s,nullptr,10); }
+static ZT_ALWAYS_INLINE unsigned int strToUInt(const char *s) noexcept { return (unsigned int)strtoul(s,nullptr,10); }
 
 
-static ZT_ALWAYS_INLINE unsigned long long strToU64(const char *s)
+static ZT_ALWAYS_INLINE unsigned long long strToU64(const char *s) noexcept
 {
 {
 #ifdef __WINDOWS__
 #ifdef __WINDOWS__
 	return (unsigned long long)_strtoui64(s,(char **)0,10);
 	return (unsigned long long)_strtoui64(s,(char **)0,10);
@@ -251,7 +251,7 @@ static ZT_ALWAYS_INLINE unsigned long long strToU64(const char *s)
 #endif
 #endif
 }
 }
 
 
-static ZT_ALWAYS_INLINE unsigned long long hexStrToU64(const char *s)
+static ZT_ALWAYS_INLINE unsigned long long hexStrToU64(const char *s) noexcept
 {
 {
 #ifdef __WINDOWS__
 #ifdef __WINDOWS__
 	return (unsigned long long)_strtoui64(s,nullptr,16);
 	return (unsigned long long)_strtoui64(s,nullptr,16);
@@ -267,7 +267,7 @@ static ZT_ALWAYS_INLINE unsigned long long hexStrToU64(const char *s)
  * @param len Length in bytes
  * @param len Length in bytes
  * @return Non-cryptographic hash suitable for use in a hash table
  * @return Non-cryptographic hash suitable for use in a hash table
  */
  */
-static ZT_ALWAYS_INLINE unsigned long hashString(const void *restrict key,const unsigned int len)
+static ZT_ALWAYS_INLINE unsigned long hashString(const void *restrict key,const unsigned int len) noexcept
 {
 {
 	const uint8_t *p = reinterpret_cast<const uint8_t *>(key);
 	const uint8_t *p = reinterpret_cast<const uint8_t *>(key);
 	unsigned long h = 0;
 	unsigned long h = 0;
@@ -283,13 +283,13 @@ static ZT_ALWAYS_INLINE unsigned long hashString(const void *restrict key,const
 }
 }
 
 
 #ifdef __GNUC__
 #ifdef __GNUC__
-static ZT_ALWAYS_INLINE unsigned int countBits(const uint8_t v) { return (unsigned int)__builtin_popcount((unsigned int)v); }
-static ZT_ALWAYS_INLINE unsigned int countBits(const uint16_t v) { return (unsigned int)__builtin_popcount((unsigned int)v); }
-static ZT_ALWAYS_INLINE unsigned int countBits(const uint32_t v) { return (unsigned int)__builtin_popcountl((unsigned long)v); }
-static ZT_ALWAYS_INLINE unsigned int countBits(const uint64_t v) { return (unsigned int)__builtin_popcountll((unsigned long long)v); }
+static ZT_ALWAYS_INLINE unsigned int countBits(const uint8_t v) noexcept { return (unsigned int)__builtin_popcount((unsigned int)v); }
+static ZT_ALWAYS_INLINE unsigned int countBits(const uint16_t v) noexcept { return (unsigned int)__builtin_popcount((unsigned int)v); }
+static ZT_ALWAYS_INLINE unsigned int countBits(const uint32_t v) noexcept { return (unsigned int)__builtin_popcountl((unsigned long)v); }
+static ZT_ALWAYS_INLINE unsigned int countBits(const uint64_t v)  noexcept{ return (unsigned int)__builtin_popcountll((unsigned long long)v); }
 #else
 #else
 template<typename T>
 template<typename T>
-static ZT_ALWAYS_INLINE unsigned int countBits(T v)
+static ZT_ALWAYS_INLINE unsigned int countBits(T v) noexcept
 {
 {
 	v = v - ((v >> 1) & (T)~(T)0/3);
 	v = v - ((v >> 1) & (T)~(T)0/3);
 	v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3);
 	v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3);
@@ -299,9 +299,9 @@ static ZT_ALWAYS_INLINE unsigned int countBits(T v)
 #endif
 #endif
 
 
 #if __BYTE_ORDER == __LITTLE_ENDIAN
 #if __BYTE_ORDER == __LITTLE_ENDIAN
-static ZT_ALWAYS_INLINE uint8_t hton(uint8_t n) { return n; }
-static ZT_ALWAYS_INLINE int8_t hton(int8_t n) { return n; }
-static ZT_ALWAYS_INLINE uint16_t hton(uint16_t n)
+static ZT_ALWAYS_INLINE uint8_t hton(uint8_t n) noexcept { return n; }
+static ZT_ALWAYS_INLINE int8_t hton(int8_t n) noexcept { return n; }
+static ZT_ALWAYS_INLINE uint16_t hton(uint16_t n) noexcept
 {
 {
 #if defined(__GNUC__)
 #if defined(__GNUC__)
 #if defined(__FreeBSD__)
 #if defined(__FreeBSD__)
@@ -313,8 +313,8 @@ static ZT_ALWAYS_INLINE uint16_t hton(uint16_t n)
 	return htons(n);
 	return htons(n);
 #endif
 #endif
 }
 }
-static ZT_ALWAYS_INLINE int16_t hton(int16_t n) { return (int16_t)Utils::hton((uint16_t)n); }
-static ZT_ALWAYS_INLINE uint32_t hton(uint32_t n)
+static ZT_ALWAYS_INLINE int16_t hton(int16_t n) noexcept { return (int16_t)Utils::hton((uint16_t)n); }
+static ZT_ALWAYS_INLINE uint32_t hton(uint32_t n) noexcept
 {
 {
 #if defined(__GNUC__)
 #if defined(__GNUC__)
 #if defined(__FreeBSD__)
 #if defined(__FreeBSD__)
@@ -326,8 +326,8 @@ static ZT_ALWAYS_INLINE uint32_t hton(uint32_t n)
 	return htonl(n);
 	return htonl(n);
 #endif
 #endif
 }
 }
-static ZT_ALWAYS_INLINE int32_t hton(int32_t n) { return (int32_t)Utils::hton((uint32_t)n); }
-static ZT_ALWAYS_INLINE uint64_t hton(uint64_t n)
+static ZT_ALWAYS_INLINE int32_t hton(int32_t n) noexcept { return (int32_t)Utils::hton((uint32_t)n); }
+static ZT_ALWAYS_INLINE uint64_t hton(uint64_t n) noexcept
 {
 {
 #if defined(__GNUC__)
 #if defined(__GNUC__)
 #if defined(__FreeBSD__)
 #if defined(__FreeBSD__)
@@ -348,16 +348,16 @@ static ZT_ALWAYS_INLINE uint64_t hton(uint64_t n)
 	);
 	);
 #endif
 #endif
 }
 }
-static ZT_ALWAYS_INLINE int64_t hton(int64_t n) { return (int64_t)hton((uint64_t)n); }
+static ZT_ALWAYS_INLINE int64_t hton(int64_t n) noexcept { return (int64_t)hton((uint64_t)n); }
 #else
 #else
 template<typename T>
 template<typename T>
-static ZT_ALWAYS_INLINE T hton(T n) { return n; }
+static ZT_ALWAYS_INLINE T hton(T n) noexcept { return n; }
 #endif
 #endif
 
 
 #if __BYTE_ORDER == __LITTLE_ENDIAN
 #if __BYTE_ORDER == __LITTLE_ENDIAN
-static ZT_ALWAYS_INLINE uint8_t ntoh(uint8_t n) { return n; }
-static ZT_ALWAYS_INLINE int8_t ntoh(int8_t n) { return n; }
-static ZT_ALWAYS_INLINE uint16_t ntoh(uint16_t n)
+static ZT_ALWAYS_INLINE uint8_t ntoh(uint8_t n) noexcept { return n; }
+static ZT_ALWAYS_INLINE int8_t ntoh(int8_t n) noexcept { return n; }
+static ZT_ALWAYS_INLINE uint16_t ntoh(uint16_t n) noexcept
 {
 {
 #if defined(__GNUC__)
 #if defined(__GNUC__)
 #if defined(__FreeBSD__)
 #if defined(__FreeBSD__)
@@ -369,8 +369,8 @@ static ZT_ALWAYS_INLINE uint16_t ntoh(uint16_t n)
 	return htons(n);
 	return htons(n);
 #endif
 #endif
 }
 }
-static ZT_ALWAYS_INLINE int16_t ntoh(int16_t n) { return (int16_t)Utils::ntoh((uint16_t)n); }
-static ZT_ALWAYS_INLINE uint32_t ntoh(uint32_t n)
+static ZT_ALWAYS_INLINE int16_t ntoh(int16_t n) noexcept { return (int16_t)Utils::ntoh((uint16_t)n); }
+static ZT_ALWAYS_INLINE uint32_t ntoh(uint32_t n) noexcept
 {
 {
 #if defined(__GNUC__)
 #if defined(__GNUC__)
 #if defined(__FreeBSD__)
 #if defined(__FreeBSD__)
@@ -382,8 +382,8 @@ static ZT_ALWAYS_INLINE uint32_t ntoh(uint32_t n)
 	return ntohl(n);
 	return ntohl(n);
 #endif
 #endif
 }
 }
-static ZT_ALWAYS_INLINE int32_t ntoh(int32_t n) { return (int32_t)Utils::ntoh((uint32_t)n); }
-static ZT_ALWAYS_INLINE uint64_t ntoh(uint64_t n)
+static ZT_ALWAYS_INLINE int32_t ntoh(int32_t n) noexcept { return (int32_t)Utils::ntoh((uint32_t)n); }
+static ZT_ALWAYS_INLINE uint64_t ntoh(uint64_t n) noexcept
 {
 {
 #if defined(__GNUC__)
 #if defined(__GNUC__)
 #if defined(__FreeBSD__)
 #if defined(__FreeBSD__)
@@ -404,14 +404,14 @@ static ZT_ALWAYS_INLINE uint64_t ntoh(uint64_t n)
 	);
 	);
 #endif
 #endif
 }
 }
-static ZT_ALWAYS_INLINE int64_t ntoh(int64_t n) { return (int64_t)ntoh((uint64_t)n); }
+static ZT_ALWAYS_INLINE int64_t ntoh(int64_t n) noexcept { return (int64_t)ntoh((uint64_t)n); }
 #else
 #else
 template<typename T>
 template<typename T>
-static ZT_ALWAYS_INLINE T ntoh(T n) { return n; }
+static ZT_ALWAYS_INLINE T ntoh(T n) noexcept { return n; }
 #endif
 #endif
 
 
 template<typename I>
 template<typename I>
-static ZT_ALWAYS_INLINE I loadBigEndian(const void *const p)
+static ZT_ALWAYS_INLINE I loadBigEndian(const void *const p) noexcept
 {
 {
 #ifdef ZT_NO_UNALIGNED_ACCESS
 #ifdef ZT_NO_UNALIGNED_ACCESS
 	I x = (I)0;
 	I x = (I)0;
@@ -429,7 +429,20 @@ static ZT_ALWAYS_INLINE I loadBigEndian(const void *const p)
 }
 }
 
 
 template<typename I>
 template<typename I>
-static ZT_ALWAYS_INLINE void storeBigEndian(void *const p,const I i)
+static ZT_ALWAYS_INLINE I loadAsIsEndian(const void *const p) noexcept
+{
+#ifdef ZT_NO_UNALIGNED_ACCESS
+	I x = (I)0;
+	for(unsigned int k=0;k<sizeof(I);++k)
+		reinterpret_cast<uint8_t *>(&x)[k] = reinterpret_cast<const uint8_t *>(p)[k];
+	return x;
+#else
+	return *reinterpret_cast<const I *>(p);
+#endif
+}
+
+template<typename I>
+static ZT_ALWAYS_INLINE void storeBigEndian(void *const p,const I i) noexcept
 {
 {
 #ifdef ZT_NO_UNALIGNED_ACCESS
 #ifdef ZT_NO_UNALIGNED_ACCESS
 	for(unsigned int k=0;k<sizeof(I);++k) {
 	for(unsigned int k=0;k<sizeof(I);++k) {

+ 383 - 95
node/VL1.cpp

@@ -29,7 +29,7 @@ namespace ZeroTier {
 
 
 namespace {
 namespace {
 
 
-ZT_ALWAYS_INLINE const Identity &ifPeerNonNull(const SharedPtr<Peer> &p)
+ZT_ALWAYS_INLINE const Identity &identityFromPeerPtr(const SharedPtr<Peer> &p)
 {
 {
 	if (p)
 	if (p)
 		return p->identity();
 		return p->identity();
@@ -49,15 +49,36 @@ VL1::~VL1()
 
 
 void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAddress &fromAddr,SharedPtr<Buf> &data,const unsigned int len)
 void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAddress &fromAddr,SharedPtr<Buf> &data,const unsigned int len)
 {
 {
+	// Get canonical Path object for this originating address and local socket pair.
+	const SharedPtr<Path> path(RR->topology->path(localSocket,fromAddr));
+
 	const int64_t now = RR->node->now();
 	const int64_t now = RR->node->now();
-	const SharedPtr<Path> path(RR->topology->getPath(localSocket,fromAddr));
-	path->received(now);
 
 
-	// Really short packets are keepalives and other junk.
-	if (len < ZT_PROTO_MIN_FRAGMENT_LENGTH)
-		return;
+	// Update path's last receive time (this is updated when anything is received at all, even if invalid or a keepalive)
+	path->received(now);
 
 
 	try {
 	try {
+		// Handle 8-byte short probes, which are used as a low-bandwidth way to initiate a real handshake.
+		// These are only minimally "secure" in the sense that they are unique per graph edge (sender->recipient)
+		// to within 1/2^64 but can easily be replayed. We rate limit this to prevent ZeroTier being used as
+		// a vector in DDOS amplification attacks, then send a larger fully authenticated message to initiate
+		// a handshake. We do not send HELLO since we don't want this to be a vector for third parties to
+		// mass-probe for ZeroTier nodes and obtain all of the information in a HELLO. This isn't a huge risk
+		// but we might as well avoid it. When the peer receives NOP on a path that hasn't been handshaked yet
+		// it will send its own HELLO to which we will respond with a fully encrypted OK(HELLO).
+		if (len == ZT_PROTO_PROBE_LENGTH) {
+			const SharedPtr<Peer> peer(RR->topology->peerByProbe(Utils::loadAsIsEndian<uint64_t>(data->b)));
+			if ((peer)&&(peer->rateGateInboundProbe(now))) {
+				peer->sendNOP(tPtr,path->localSocket(),path->address(),now);
+				path->sent(now);
+			}
+			return;
+		}
+
+		// Discard any other runt packets that aren't probes. These are likely to be keepalives or corrupt junk.
+		if (len < ZT_PROTO_MIN_FRAGMENT_LENGTH)
+			return;
+
 		FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS> pktv;
 		FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS> pktv;
 		Address destination;
 		Address destination;
 
 
@@ -103,7 +124,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 			destination.setTo(ph.destination);
 			destination.setTo(ph.destination);
 
 
 			if (destination != RR->identity.address()) {
 			if (destination != RR->identity.address()) {
-				// Packet or packet head is not address to this node ----------------------------------------------------------
+				// Packet or packet head is not addressed to this node --------------------------------------------------------
 				_relay(tPtr,path,destination,data,len);
 				_relay(tPtr,path,destination,data,len);
 				return;
 				return;
 			}
 			}
@@ -146,7 +167,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 		// there is enough room in each slice to shift their contents to sizes that are multiples
 		// there is enough room in each slice to shift their contents to sizes that are multiples
 		// of 64 if needed for crypto.
 		// of 64 if needed for crypto.
 		if ((pktv.empty()) || (((int)pktv[0].e - (int)pktv[0].s) < sizeof(Protocol::Header))) {
 		if ((pktv.empty()) || (((int)pktv[0].e - (int)pktv[0].s) < sizeof(Protocol::Header))) {
-			RR->t->unexpectedError(tPtr,0x3df19990,"empty or undersized packet vector");
+			RR->t->unexpectedError(tPtr,0x3df19990,"empty or undersized packet vector after parsing packet from %s of length %d",Trace::str(path->address()).s,(int)len);
 			return;
 			return;
 		}
 		}
 		for(FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS>::const_iterator s(pktv.begin());s!=pktv.end();++s) {
 		for(FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS>::const_iterator s(pktv.begin());s!=pktv.end();++s) {
@@ -159,7 +180,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 
 
 		if (source == RR->identity.address())
 		if (source == RR->identity.address())
 			return;
 			return;
-		SharedPtr<Peer> peer(RR->topology->get(tPtr,source));
+		SharedPtr<Peer> peer(RR->topology->peer(tPtr,source));
 
 
 		Buf::Slice pkt;
 		Buf::Slice pkt;
 		bool authenticated = false;
 		bool authenticated = false;
@@ -171,7 +192,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 		for(FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS>::const_iterator s(pktv.begin()+1);s!=pktv.end();++s)
 		for(FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS>::const_iterator s(pktv.begin()+1);s!=pktv.end();++s)
 			packetSize += s->e - s->s;
 			packetSize += s->e - s->s;
 		if (packetSize > ZT_PROTO_MAX_PACKET_LENGTH) {
 		if (packetSize > ZT_PROTO_MAX_PACKET_LENGTH) {
-			RR->t->incomingPacketDropped(tPtr,0x010348da,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+			RR->t->incomingPacketDropped(tPtr,0x010348da,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
 			return;
 			return;
 		}
 		}
 
 
@@ -179,7 +200,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 		if ((!peer)&&(!(((cipher == ZT_PROTO_CIPHER_SUITE__POLY1305_NONE)||(cipher == ZT_PROTO_CIPHER_SUITE__NONE))&&((ph->verb & 0x1fU) == Protocol::VERB_HELLO)))) {
 		if ((!peer)&&(!(((cipher == ZT_PROTO_CIPHER_SUITE__POLY1305_NONE)||(cipher == ZT_PROTO_CIPHER_SUITE__NONE))&&((ph->verb & 0x1fU) == Protocol::VERB_HELLO)))) {
 			pkt = Buf::assembleSliceVector(pktv);
 			pkt = Buf::assembleSliceVector(pktv);
 			if (pkt.e < ZT_PROTO_MIN_PACKET_LENGTH) {
 			if (pkt.e < ZT_PROTO_MIN_PACKET_LENGTH) {
-				RR->t->incomingPacketDropped(tPtr,0xbada9366,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+				RR->t->incomingPacketDropped(tPtr,0xbada9366,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
 				return;
 				return;
 			}
 			}
 			{
 			{
@@ -274,7 +295,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 					}
 					}
 					authenticated = true;
 					authenticated = true;
 				} else {
 				} else {
-					RR->t->incomingPacketDropped(tPtr,0xb0b01999,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
+					RR->t->incomingPacketDropped(tPtr,0xb0b01999,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 					return;
 					return;
 				}
 				}
 				break;
 				break;
@@ -286,13 +307,13 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 
 
 				pkt = Buf::assembleSliceVector(pktv);
 				pkt = Buf::assembleSliceVector(pktv);
 				if (pkt.e < ZT_PROTO_MIN_PACKET_LENGTH)
 				if (pkt.e < ZT_PROTO_MIN_PACKET_LENGTH)
-					RR->t->incomingPacketDropped(tPtr,0x3d3337df,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+					RR->t->incomingPacketDropped(tPtr,0x3d3337df,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
 				ph = &(pkt.b->as<Protocol::Header>());
 				ph = &(pkt.b->as<Protocol::Header>());
 
 
 				if (RR->topology->shouldInboundPathBeTrusted(path->address(),Utils::ntoh(ph->mac))) {
 				if (RR->topology->shouldInboundPathBeTrusted(path->address(),Utils::ntoh(ph->mac))) {
 					authenticated = true;
 					authenticated = true;
 				} else {
 				} else {
-					RR->t->incomingPacketDropped(tPtr,0x2dfa910b,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_NOT_TRUSTED_PATH);
+					RR->t->incomingPacketDropped(tPtr,0x2dfa910b,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_NOT_TRUSTED_PATH);
 					return;
 					return;
 				}
 				}
 			} break;
 			} break;
@@ -303,21 +324,27 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 			//	break;
 			//	break;
 
 
 			default:
 			default:
-				RR->t->incomingPacketDropped(tPtr,0x5b001099,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
+				RR->t->incomingPacketDropped(tPtr,0x5b001099,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
 				return;
 				return;
 		}
 		}
 
 
-		// Packet fully assembled and may be authenticated ----------------------------------------------------------------
+		// Packet fully assembled, authenticated 'true' if already authenticated via MAC ----------------------------------
 
 
 		// Return any still held buffers in pktv to the buffer pool.
 		// Return any still held buffers in pktv to the buffer pool.
 		pktv.clear();
 		pktv.clear();
 
 
 		const Protocol::Verb verb = (Protocol::Verb)(ph->verb & ZT_PROTO_VERB_MASK);
 		const Protocol::Verb verb = (Protocol::Verb)(ph->verb & ZT_PROTO_VERB_MASK);
 
 
+		// Note that all verbs except HELLO require MAC.
+		if (((!authenticated)||(!peer))&&(verb != Protocol::VERB_HELLO)) {
+			RR->t->incomingPacketDropped(tPtr,0x5b001099,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
+			return;
+		}
+
 		// Decompress packet payload if compressed.
 		// Decompress packet payload if compressed.
 		if ((ph->verb & ZT_PROTO_VERB_FLAG_COMPRESSED) != 0) {
 		if ((ph->verb & ZT_PROTO_VERB_FLAG_COMPRESSED) != 0) {
 			if (!authenticated) {
 			if (!authenticated) {
-				RR->t->incomingPacketDropped(tPtr,0x390bcd0a,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+				RR->t->incomingPacketDropped(tPtr,0x390bcd0a,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
 				return;
 				return;
 			}
 			}
 
 
@@ -337,49 +364,71 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 				pkt.b.swap(nb);
 				pkt.b.swap(nb);
 				pkt.e = packetSize = (unsigned int)uncompressedLen;
 				pkt.e = packetSize = (unsigned int)uncompressedLen;
 			} else {
 			} else {
-				RR->t->incomingPacketDropped(tPtr,0xee9e4392,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_INVALID_COMPRESSED_DATA);
+				RR->t->incomingPacketDropped(tPtr,0xee9e4392,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_INVALID_COMPRESSED_DATA);
 				return;
 				return;
 			}
 			}
 		}
 		}
 
 
-		// VL1 and VL2 are conceptually and (mostly) logically separate layers.
-		// Verbs that relate to VL1 (P2P transport) are handled in this class.
-		// VL2 (virtual Ethernet / SDN) verbs are handled in the VL2 class.
+		/*
+		 * Important notes:
+		 *
+		 * All verbs except HELLO assume that authenticated is true and peer is non-NULL.
+		 * This is checked above. HELLO will accept either case and always performs its
+		 * own secondary validation. The path argument is never NULL.
+		 *
+		 * VL1 and VL2 are conceptually separate layers of the ZeroTier protocol. In the
+		 * code they are almost entirely logically separate. To make the code easier to
+		 * understand the handlers for VL2 data paths have been moved to a VL2 class.
+		 */
+
+		bool ok = true;
 		switch(verb) {
 		switch(verb) {
-			case Protocol::VERB_NOP:
-				peer->received(tPtr,path,hops,ph->packetId,packetSize - ZT_PROTO_PACKET_PAYLOAD_START,Protocol::VERB_NOP,0,Protocol::VERB_NOP,0);
-				break;
-
-			case Protocol::VERB_HELLO:                      _HELLO(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_ERROR:                      _ERROR(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_OK:                         _OK(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_WHOIS:                      _WHOIS(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_RENDEZVOUS:                 _RENDEZVOUS(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_FRAME:                      RR->vl2->_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_EXT_FRAME:                  RR->vl2->_EXT_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_ECHO:                       _ECHO(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated);
-			case Protocol::VERB_MULTICAST_LIKE:             RR->vl2->_MULTICAST_LIKE(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_NETWORK_CREDENTIALS:        RR->vl2->_NETWORK_CREDENTIALS(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_NETWORK_CONFIG_REQUEST:     RR->vl2->_NETWORK_CONFIG_REQUEST(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_NETWORK_CONFIG:             RR->vl2->_NETWORK_CONFIG(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_MULTICAST_GATHER:           RR->vl2->_MULTICAST_GATHER(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_MULTICAST_FRAME_deprecated: RR->vl2->_MULTICAST_FRAME_deprecated(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_PUSH_DIRECT_PATHS:          _PUSH_DIRECT_PATHS(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_USER_MESSAGE:               _USER_MESSAGE(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_MULTICAST:                  RR->vl2->_MULTICAST(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-			case Protocol::VERB_ENCAP:                      _ENCAP(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
-
+			case Protocol::VERB_NOP:                        break;
+			case Protocol::VERB_HELLO:                      ok = _HELLO(tPtr,path,peer,*pkt.b,(int)packetSize,authenticated); break;
+			case Protocol::VERB_ERROR:                      ok = _ERROR(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_OK:                         ok = _OK(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_WHOIS:                      ok = _WHOIS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_RENDEZVOUS:                 ok = _RENDEZVOUS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_FRAME:                      ok = RR->vl2->_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_EXT_FRAME:                  ok = RR->vl2->_EXT_FRAME(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_ECHO:                       ok = _ECHO(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_MULTICAST_LIKE:             ok = RR->vl2->_MULTICAST_LIKE(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_NETWORK_CREDENTIALS:        ok = RR->vl2->_NETWORK_CREDENTIALS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_NETWORK_CONFIG_REQUEST:     ok = RR->vl2->_NETWORK_CONFIG_REQUEST(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_NETWORK_CONFIG:             ok = RR->vl2->_NETWORK_CONFIG(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_MULTICAST_GATHER:           ok = RR->vl2->_MULTICAST_GATHER(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_MULTICAST_FRAME_deprecated: ok = RR->vl2->_MULTICAST_FRAME_deprecated(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_PUSH_DIRECT_PATHS:          ok = _PUSH_DIRECT_PATHS(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_USER_MESSAGE:               ok = _USER_MESSAGE(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_MULTICAST:                  ok = RR->vl2->_MULTICAST(tPtr,path,peer,*pkt.b,(int)packetSize); break;
+			case Protocol::VERB_ENCAP:                      ok = _ENCAP(tPtr,path,peer,*pkt.b,(int)packetSize); break;
 			default:
 			default:
-				RR->t->incomingPacketDropped(tPtr,0xdeadeff0,ph->packetId,0,ifPeerNonNull(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_UNRECOGNIZED_VERB);
+				RR->t->incomingPacketDropped(tPtr,0xdeadeff0,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_UNRECOGNIZED_VERB);
 				break;
 				break;
 		}
 		}
+		if (ok)
+			peer->received(tPtr,path,hops,ph->packetId,packetSize - ZT_PROTO_PACKET_PAYLOAD_START,verb);
 	} catch ( ... ) {
 	} catch ( ... ) {
-		RR->t->unexpectedError(tPtr,0xea1b6dea,"unexpected exception in onRemotePacket()");
+		RR->t->unexpectedError(tPtr,0xea1b6dea,"unexpected exception in onRemotePacket() parsing packet from %s",Trace::str(path->address()).s);
 	}
 	}
 }
 }
 
 
 void VL1::_relay(void *tPtr,const SharedPtr<Path> &path,const Address &destination,SharedPtr<Buf> &data,unsigned int len)
 void VL1::_relay(void *tPtr,const SharedPtr<Path> &path,const Address &destination,SharedPtr<Buf> &data,unsigned int len)
 {
 {
+	const uint8_t newHopCount = (data->b[ZT_PROTO_PACKET_FLAGS_INDEX] & 7U) + 1;
+	if (newHopCount >= ZT_RELAY_MAX_HOPS)
+		return;
+	data->b[ZT_PROTO_PACKET_FLAGS_INDEX] = (data->b[ZT_PROTO_PACKET_FLAGS_INDEX] & 0xf8U) | newHopCount;
+
+	const SharedPtr<Peer> toPeer(RR->topology->peer(tPtr,destination,false));
+	if (!toPeer)
+		return;
+	const uint64_t now = RR->node->now();
+	const SharedPtr<Path> toPath(toPeer->path(now));
+	if (!toPath)
+		return;
+
+	toPath->send(RR,tPtr,data->b,len,now);
 }
 }
 
 
 void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
 void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
@@ -417,61 +466,60 @@ void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
 		ph.flags = 0;
 		ph.flags = 0;
 		ph.verb = Protocol::VERB_OK;
 		ph.verb = Protocol::VERB_OK;
 
 
-		int ptr = sizeof(Protocol::Header);
-		while ((a != toSend.end())&&(ptr < (ZT_PROTO_MAX_PACKET_LENGTH - 1))) {
-			a->copyTo(outp.b + ptr);
+		int outl = sizeof(Protocol::Header);
+		while ((a != toSend.end())&&(outl < ZT_PROTO_MAX_PACKET_LENGTH)) {
+			a->copyTo(outp.b + outl);
 			++a;
 			++a;
-			ptr += ZT_ADDRESS_LENGTH;
+			outl += ZT_ADDRESS_LENGTH;
 		}
 		}
 
 
-		if (ptr > sizeof(Protocol::Header)) {
-			Protocol::armor(outp,ptr,root->key(),ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
-			rootPath->send(RR,tPtr,outp.b,ptr,now);
+		if (outl > sizeof(Protocol::Header)) {
+			Protocol::armor(outp,outl,root->key(),ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
+			rootPath->send(RR,tPtr,outp.b,outl,now);
 		}
 		}
 	}
 	}
 }
 }
 
 
-void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
 {
 {
 	if (packetSize < sizeof(Protocol::HELLO)) {
 	if (packetSize < sizeof(Protocol::HELLO)) {
-		RR->t->incomingPacketDropped(tPtr,0x2bdb0001,0,0,ifPeerNonNull(peer),path->address(),0,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
-		return;
+		RR->t->incomingPacketDropped(tPtr,0x2bdb0001,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+		return false;
 	}
 	}
-
 	Protocol::HELLO &p = pkt.as<Protocol::HELLO>();
 	Protocol::HELLO &p = pkt.as<Protocol::HELLO>();
 	const uint8_t hops = Protocol::packetHops(p.h);
 	const uint8_t hops = Protocol::packetHops(p.h);
 	int ptr = sizeof(Protocol::HELLO);
 	int ptr = sizeof(Protocol::HELLO);
 
 
 	if (p.versionProtocol < ZT_PROTO_VERSION_MIN) {
 	if (p.versionProtocol < ZT_PROTO_VERSION_MIN) {
-		RR->t->incomingPacketDropped(tPtr,0xe8d12bad,p.h.packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_PEER_TOO_OLD);
-		return;
+		RR->t->incomingPacketDropped(tPtr,0xe8d12bad,p.h.packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_PEER_TOO_OLD);
+		return false;
 	}
 	}
 
 
 	Identity id;
 	Identity id;
 	if (pkt.rO(ptr,id) < 0) {
 	if (pkt.rO(ptr,id) < 0) {
-		RR->t->incomingPacketDropped(tPtr,0x707a9810,p.h.packetId,0,ifPeerNonNull(peer),path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
-		return;
+		RR->t->incomingPacketDropped(tPtr,0x707a9810,p.h.packetId,0,identityFromPeerPtr(peer),path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
+		return false;
 	}
 	}
 	if (Address(p.h.source) != id.address()) {
 	if (Address(p.h.source) != id.address()) {
 		RR->t->incomingPacketDropped(tPtr,0x06aa9ff1,p.h.packetId,0,Identity::NIL,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 		RR->t->incomingPacketDropped(tPtr,0x06aa9ff1,p.h.packetId,0,Identity::NIL,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
-		return;
+		return false;
 	}
 	}
 
 
-	// Packet is basically valid and identity unmarshaled ---------------------------------------------------------------
+	// Packet is basically valid and identity unmarshaled successfully --------------------------------------------------
 
 
 	// Get long-term static key for this node.
 	// Get long-term static key for this node.
 	uint8_t key[ZT_PEER_SECRET_KEY_LENGTH];
 	uint8_t key[ZT_PEER_SECRET_KEY_LENGTH];
-	if ((peer)&&(id == peer->identity())) {
+	if ((peer) && (id == peer->identity())) {
 		memcpy(key,peer->key(),ZT_PEER_SECRET_KEY_LENGTH);
 		memcpy(key,peer->key(),ZT_PEER_SECRET_KEY_LENGTH);
 	} else {
 	} else {
 		peer.zero();
 		peer.zero();
 		if (!RR->identity.agree(id,key)) {
 		if (!RR->identity.agree(id,key)) {
 			RR->t->incomingPacketDropped(tPtr,0x46db8010,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 			RR->t->incomingPacketDropped(tPtr,0x46db8010,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
-			return;
+			return false;
 		}
 		}
 	}
 	}
 
 
-	// Verify packet using Poly1305, which for v2.x
+	// Verify packet using Poly1305 MAC
 	{
 	{
 		uint8_t perPacketKey[ZT_PEER_SECRET_KEY_LENGTH];
 		uint8_t perPacketKey[ZT_PEER_SECRET_KEY_LENGTH];
 		uint8_t macKey[ZT_POLY1305_KEY_LEN];
 		uint8_t macKey[ZT_POLY1305_KEY_LEN];
@@ -481,7 +529,7 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 		poly1305(mac,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
 		poly1305(mac,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
 		if (p.h.mac != mac[0]) {
 		if (p.h.mac != mac[0]) {
 			RR->t->incomingPacketDropped(tPtr,0x11bfff81,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 			RR->t->incomingPacketDropped(tPtr,0x11bfff81,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
-			return;
+			return false;
 		}
 		}
 	}
 	}
 
 
@@ -495,8 +543,8 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 	// Get external surface address if present.
 	// Get external surface address if present.
 	if (ptr < packetSize) {
 	if (ptr < packetSize) {
 		if (pkt.rO(ptr,externalSurfaceAddress) < 0) {
 		if (pkt.rO(ptr,externalSurfaceAddress) < 0) {
-			RR->t->incomingPacketDropped(tPtr,0xf1000023,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
-			return;
+			RR->t->incomingPacketDropped(tPtr,0x10001003,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
+			return false;
 		}
 		}
 	}
 	}
 
 
@@ -506,7 +554,7 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 		// can't even get ephemeral public keys without first knowing the long term secret key,
 		// can't even get ephemeral public keys without first knowing the long term secret key,
 		// adding a little defense in depth.
 		// adding a little defense in depth.
 		uint8_t iv[8];
 		uint8_t iv[8];
-		for(int i=0;i<8;++i) iv[i] = pkt.b[i];
+		for (int i = 0; i < 8; ++i) iv[i] = pkt.b[i];
 		iv[7] &= 0xf8U;
 		iv[7] &= 0xf8U;
 		Salsa20 s20(key,iv);
 		Salsa20 s20(key,iv);
 		s20.crypt12(pkt.b + ptr,pkt.b + ptr,packetSize - ptr);
 		s20.crypt12(pkt.b + ptr,pkt.b + ptr,packetSize - ptr);
@@ -517,13 +565,13 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 			const void *const dictionaryBytes = pkt.b + ptr;
 			const void *const dictionaryBytes = pkt.b + ptr;
 			if ((ptr += (int)dictionarySize) > packetSize) {
 			if ((ptr += (int)dictionarySize) > packetSize) {
 				RR->t->incomingPacketDropped(tPtr,0x0d0f0112,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
 				RR->t->incomingPacketDropped(tPtr,0x0d0f0112,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
-				return;
+				return false;
 			}
 			}
 
 
 			ptr += pkt.rI16(ptr); // skip any additional fields, currently always 0
 			ptr += pkt.rI16(ptr); // skip any additional fields, currently always 0
 			if (ptr > packetSize) {
 			if (ptr > packetSize) {
 				RR->t->incomingPacketDropped(tPtr,0x451f2341,0,p.h.packetId,id,path->address(),0,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
 				RR->t->incomingPacketDropped(tPtr,0x451f2341,0,p.h.packetId,id,path->address(),0,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
-				return;
+				return false;
 			}
 			}
 
 
 			if ((ptr + ZT_SHA384_DIGEST_LEN) <= packetSize) {
 			if ((ptr + ZT_SHA384_DIGEST_LEN) <= packetSize) {
@@ -531,7 +579,7 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 				HMACSHA384(hmacKey,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,hmac);
 				HMACSHA384(hmacKey,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,hmac);
 				if (!Utils::secureEq(pkt.b + ptr,hmac,ZT_HMACSHA384_LEN)) {
 				if (!Utils::secureEq(pkt.b + ptr,hmac,ZT_HMACSHA384_LEN)) {
 					RR->t->incomingPacketDropped(tPtr,0x1000662a,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 					RR->t->incomingPacketDropped(tPtr,0x1000662a,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
-					return;
+					return false;
 				}
 				}
 				hmacAuthenticated = true;
 				hmacAuthenticated = true;
 			}
 			}
@@ -539,41 +587,41 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 			if (dictionarySize) {
 			if (dictionarySize) {
 				if (!nodeMetaData.decode(dictionaryBytes,dictionarySize)) {
 				if (!nodeMetaData.decode(dictionaryBytes,dictionarySize)) {
 					RR->t->incomingPacketDropped(tPtr,0x67192344,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
 					RR->t->incomingPacketDropped(tPtr,0x67192344,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
-					return;
+					return false;
 				}
 				}
 			}
 			}
 		}
 		}
 	}
 	}
 
 
-	// v2.x+ peers must include HMAC, older peers don't
-	if ((!hmacAuthenticated)&&(p.versionProtocol >= 11)) {
+	// v2.x+ peers must include HMAC, older peers don't (we'll drop support for them when 1.x is dead)
+	if ((!hmacAuthenticated) && (p.versionProtocol >= 11)) {
 		RR->t->incomingPacketDropped(tPtr,0x571feeea,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 		RR->t->incomingPacketDropped(tPtr,0x571feeea,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
-		return;
+		return false;
 	}
 	}
 
 
-	// Packet is fully decoded and has passed full HMAC (if present) ----------------------------------------------------
+	// Packet is fully decoded and has passed all tests -----------------------------------------------------------------
 
 
 	const int64_t now = RR->node->now();
 	const int64_t now = RR->node->now();
 
 
 	if (!peer) {
 	if (!peer) {
 		if (!RR->node->rateGateIdentityVerification(now,path->address())) {
 		if (!RR->node->rateGateIdentityVerification(now,path->address())) {
 			RR->t->incomingPacketDropped(tPtr,0xaffa9ff7,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
 			RR->t->incomingPacketDropped(tPtr,0xaffa9ff7,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
-			return;
+			return false;
 		}
 		}
 		if (!id.locallyValidate()) {
 		if (!id.locallyValidate()) {
 			RR->t->incomingPacketDropped(tPtr,0x2ff7a909,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
 			RR->t->incomingPacketDropped(tPtr,0x2ff7a909,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_HELLO,ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
-			return;
+			return false;
 		}
 		}
 		peer.set(new Peer(RR));
 		peer.set(new Peer(RR));
 		if (!peer)
 		if (!peer)
-			return;
-		peer->init(RR->identity,id);
+			return false;
+		peer->init(id);
 		peer = RR->topology->add(tPtr,peer);
 		peer = RR->topology->add(tPtr,peer);
 	}
 	}
 
 
 	// All validation steps complete, peer learned if not yet known -----------------------------------------------------
 	// All validation steps complete, peer learned if not yet known -----------------------------------------------------
 
 
-	if ((hops == 0)&&(externalSurfaceAddress))
+	if ((hops == 0) && (externalSurfaceAddress))
 		RR->sa->iam(tPtr,id,path->localSocket(),path->address(),externalSurfaceAddress,RR->topology->isRoot(id),now);
 		RR->sa->iam(tPtr,id,path->localSocket(),path->address(),externalSurfaceAddress,RR->topology->isRoot(id),now);
 
 
 	std::vector<uint8_t> myNodeMetaDataBin;
 	std::vector<uint8_t> myNodeMetaDataBin;
@@ -581,8 +629,10 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 		Dictionary myNodeMetaData;
 		Dictionary myNodeMetaData;
 		myNodeMetaData.encode(myNodeMetaDataBin);
 		myNodeMetaData.encode(myNodeMetaDataBin);
 	}
 	}
-	if (myNodeMetaDataBin.size() > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
-		return;
+	if (myNodeMetaDataBin.size() > ZT_PROTO_MAX_PACKET_LENGTH) {
+		RR->t->unexpectedError(tPtr,0xbc8861e0,"node meta-data dictionary exceeds maximum packet length while composing OK(HELLO) to %s",Trace::str(id.address(),path).s);
+		return false;
+	}
 
 
 	Buf outp;
 	Buf outp;
 	Protocol::OK::HELLO &ok = outp.as<Protocol::OK::HELLO>();
 	Protocol::OK::HELLO &ok = outp.as<Protocol::OK::HELLO>();
@@ -612,7 +662,7 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 		outp.wI(outl,(uint16_t)0); // length of additional fields, currently 0
 		outp.wI(outl,(uint16_t)0); // length of additional fields, currently 0
 
 
 		if ((outl + ZT_HMACSHA384_LEN) > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check, shouldn't be possible
 		if ((outl + ZT_HMACSHA384_LEN) > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check, shouldn't be possible
-			return;
+			return false;
 
 
 		KBKDFHMACSHA384(key,ZT_PROTO_KDF_KEY_LABEL_HELLO_HMAC,0,1,hmacKey); // iter == 1 for OK
 		KBKDFHMACSHA384(key,ZT_PROTO_KDF_KEY_LABEL_HELLO_HMAC,0,1,hmacKey); // iter == 1 for OK
 		HMACSHA384(hmacKey,outp.b + sizeof(ok.h),outl - sizeof(ok.h),outp.b + outl);
 		HMACSHA384(hmacKey,outp.b + sizeof(ok.h),outl - sizeof(ok.h),outp.b + outl);
@@ -623,39 +673,277 @@ void VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 	path->send(RR,tPtr,outp.b,outl,now);
 	path->send(RR,tPtr,outp.b,outl,now);
 
 
 	peer->setRemoteVersion(p.versionProtocol,p.versionMajor,p.versionMinor,Utils::ntoh(p.versionRev));
 	peer->setRemoteVersion(p.versionProtocol,p.versionMajor,p.versionMinor,Utils::ntoh(p.versionRev));
-	peer->received(tPtr,path,hops,p.h.packetId,packetSize - ZT_PROTO_PACKET_PAYLOAD_START,Protocol::VERB_HELLO,0,Protocol::VERB_NOP,0);
+
+	return true;
 }
 }
 
 
-void VL1::_ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
+	if (packetSize < sizeof(Protocol::ERROR::Header)) {
+		RR->t->incomingPacketDropped(tPtr,0x3beb1947,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_ERROR,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+		return false;
+	}
+	Protocol::ERROR::Header &eh = pkt.as<Protocol::ERROR::Header>();
+	switch(eh.error) {
+
+		//case Protocol::ERROR_INVALID_REQUEST:
+		//case Protocol::ERROR_BAD_PROTOCOL_VERSION:
+		//case Protocol::ERROR_CANNOT_DELIVER:
+		default:
+			break;
+
+		case Protocol::ERROR_OBJ_NOT_FOUND:
+			if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
+			}
+			break;
+
+		case Protocol::ERROR_UNSUPPORTED_OPERATION:
+			if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
+			}
+			break;
+
+		case Protocol::ERROR_NEED_MEMBERSHIP_CERTIFICATE:
+			break;
+
+		case Protocol::ERROR_NETWORK_ACCESS_DENIED_:
+			if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
+			}
+			break;
+
+	}
+	return true;
 }
 }
 
 
-void VL1::_OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
+	if (packetSize < sizeof(Protocol::OK::Header)) {
+		RR->t->incomingPacketDropped(tPtr,0x4c1f1ff7,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_OK,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+		return false;
+	}
+	Protocol::OK::Header &oh = pkt.as<Protocol::OK::Header>();
+	switch(oh.inReVerb) {
+
+		case Protocol::VERB_HELLO:
+			break;
+
+		case Protocol::VERB_WHOIS:
+			break;
+
+		case Protocol::VERB_NETWORK_CONFIG_REQUEST:
+			break;
+
+		case Protocol::VERB_MULTICAST_GATHER:
+			break;
+
+	}
+	return true;
 }
 }
 
 
-void VL1::_WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
+	if (packetSize < sizeof(Protocol::OK::Header)) {
+		RR->t->incomingPacketDropped(tPtr,0x4c1f1ff7,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_OK,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+		return false;
+	}
+	Protocol::Header &ph = pkt.as<Protocol::Header>();
+
+	if (!peer->rateGateInboundWhoisRequest(RR->node->now())) {
+		RR->t->incomingPacketDropped(tPtr,0x19f7194a,ph.packetId,0,peer->identity(),path->address(),Protocol::packetHops(ph),Protocol::VERB_WHOIS,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
+		return true;
+	}
+
+	Buf outp;
+	Protocol::OK::WHOIS &outh = outp.as<Protocol::OK::WHOIS>();
+	int ptr = sizeof(Protocol::Header);
+	while ((ptr + ZT_ADDRESS_LENGTH) <= packetSize) {
+		outh.h.h.packetId = Protocol::getPacketId();
+		peer->address().copyTo(outh.h.h.destination);
+		RR->identity.address().copyTo(outh.h.h.source);
+		outh.h.h.flags = 0;
+		outh.h.h.verb = Protocol::VERB_OK;
+
+		outh.h.inReVerb = Protocol::VERB_WHOIS;
+		outh.h.inRePacketId = ph.packetId;
+
+		int outl = sizeof(Protocol::OK::WHOIS);
+		while ( ((ptr + ZT_ADDRESS_LENGTH) <= packetSize) && ((outl + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX) < ZT_PROTO_MAX_PACKET_LENGTH) ) {
+			const SharedPtr<Peer> &wp(RR->topology->peer(tPtr,Address(pkt.b + ptr)));
+			if (wp) {
+				outp.wO(outl,wp->identity());
+				if (peer->remoteVersionProtocol() >= 11) { // older versions don't know what a locator is
+					const Locator loc(wp->locator());
+					outp.wO(outl,loc);
+				}
+				if (Buf::writeOverflow(outl)) { // sanity check, shouldn't be possible
+					RR->t->unexpectedError(tPtr,0xabc0f183,"Buf write overflow building OK(WHOIS) to reply to %s",Trace::str(peer->address(),path).s);
+					return false;
+				}
+			}
+			ptr += ZT_ADDRESS_LENGTH;
+		}
+
+		if (outl > sizeof(Protocol::OK::WHOIS)) {
+			Protocol::armor(outp,outl,peer->key(),ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
+			path->send(RR,tPtr,outp.b,outl,RR->node->now());
+		}
+	}
+
+	return true;
 }
 }
 
 
-void VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
+	static uint16_t junk = 0;
+	if (RR->topology->isRoot(peer->identity())) {
+		if (packetSize < sizeof(Protocol::RENDEZVOUS)) {
+			RR->t->incomingPacketDropped(tPtr,0x43e90ab3,Protocol::packetId(pkt,packetSize),0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_RENDEZVOUS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+			return false;
+		}
+		Protocol::RENDEZVOUS &rdv = pkt.as<Protocol::RENDEZVOUS>();
+
+		const SharedPtr<Peer> with(RR->topology->peer(tPtr,Address(rdv.peerAddress)));
+		if (with) {
+			const unsigned int port = Utils::ntoh(rdv.port);
+			if (port != 0) {
+				switch(rdv.addressLength) {
+					case 4:
+						if ((sizeof(Protocol::RENDEZVOUS) + 4) <= packetSize) {
+							InetAddress atAddr(pkt.b + sizeof(Protocol::RENDEZVOUS),4,port);
+							++junk;
+							RR->node->putPacket(tPtr,path->localSocket(),atAddr,(const void *)&junk,2,2); // IPv4 "firewall opener" hack
+							with->sendHELLO(tPtr,path->localSocket(),atAddr,RR->node->now());
+							RR->t->tryingNewPath(tPtr,0x55a19aaa,with->identity(),atAddr,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->address(),peer->identity().hash(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
+						}
+						break;
+					case 16:
+						if ((sizeof(Protocol::RENDEZVOUS) + 16) <= packetSize) {
+							InetAddress atAddr(pkt.b + sizeof(Protocol::RENDEZVOUS),16,port);
+							with->sendHELLO(tPtr,path->localSocket(),atAddr,RR->node->now());
+							RR->t->tryingNewPath(tPtr,0x54bada09,with->identity(),atAddr,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->address(),peer->identity().hash(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
+						}
+						break;
+				}
+			}
+		}
+	}
+	return true;
 }
 }
 
 
-void VL1::_ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
+	const uint64_t packetId = Protocol::packetId(pkt,packetSize);
+	const uint64_t now = RR->node->now();
+	if (packetSize < sizeof(Protocol::Header)) {
+		RR->t->incomingPacketDropped(tPtr,0x14d70bb0,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+		return false;
+	}
+	if (peer->rateGateEchoRequest(now)) {
+		Buf outp;
+		Protocol::OK::ECHO &outh = outp.as<Protocol::OK::ECHO>();
+		outh.h.h.packetId = Protocol::getPacketId();
+		peer->address().copyTo(outh.h.h.destination);
+		RR->identity.address().copyTo(outh.h.h.source);
+		outh.h.h.flags = 0;
+		outh.h.h.verb = Protocol::VERB_OK;
+		outh.h.inReVerb = Protocol::VERB_ECHO;
+		outh.h.inRePacketId = packetId;
+		int outl = sizeof(Protocol::OK::ECHO);
+		outp.wB(outl,pkt.b + sizeof(Protocol::Header),packetSize - sizeof(Protocol::Header));
+
+		if (Buf::writeOverflow(outl)) {
+			RR->t->incomingPacketDropped(tPtr,0x14d70bb0,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+			return false;
+		}
+
+		Protocol::armor(outp,outl,peer->key(),ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012);
+		path->send(RR,tPtr,outp.b,outl,now);
+	} else {
+		RR->t->incomingPacketDropped(tPtr,0x27878bc1,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
+	}
+	return true;
 }
 }
 
 
-void VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
+	if (packetSize < sizeof(Protocol::PUSH_DIRECT_PATHS)) {
+		RR->t->incomingPacketDropped(tPtr,0x1bb1bbb1,Protocol::packetId(pkt,packetSize),0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+		return false;
+	}
+	Protocol::PUSH_DIRECT_PATHS &pdp = pkt.as<Protocol::PUSH_DIRECT_PATHS>();
+
+	const uint64_t now = RR->node->now();
+	if (!peer->rateGateInboundPushDirectPaths(now)) {
+		RR->t->incomingPacketDropped(tPtr,0x35b1aaaa,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
+		return true;
+	}
+
+	int ptr = sizeof(Protocol::PUSH_DIRECT_PATHS);
+	const unsigned int numPaths = Utils::ntoh(pdp.numPaths);
+	InetAddress a;
+	Endpoint ep;
+	for(unsigned int pi=0;pi<numPaths;++pi) {
+		/*const uint8_t flags = pkt.rI8(ptr);*/ ++ptr;
+		ptr += pkt.rI16(ptr); // extended attributes size, currently always 0
+		const unsigned int addrType = pkt.rI8(ptr);
+		const unsigned int addrRecordLen = pkt.rI8(ptr);
+		if (addrRecordLen == 0) {
+			RR->t->incomingPacketDropped(tPtr,0xaed00118,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+			return false;
+		}
+
+		const void *addrBytes = nullptr;
+		unsigned int addrLen = 0;
+		unsigned int addrPort = 0;
+		switch(addrType) {
+			case 0:
+				addrBytes = pkt.rBnc(ptr,addrRecordLen);
+				addrLen = addrRecordLen;
+				break;
+			case 4:
+				addrBytes = pkt.rBnc(ptr,4);
+				addrLen = 4;
+				addrPort = pkt.rI16(ptr);
+				break;
+			case 6:
+				addrBytes = pkt.rBnc(ptr,16);
+				addrLen = 16;
+				addrPort = pkt.rI16(ptr);
+				break;
+		}
+
+		if (Buf::readOverflow(ptr,packetSize)) {
+			RR->t->incomingPacketDropped(tPtr,0xbad0f10f,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+			return false;
+		}
+
+		if (addrPort) {
+			a.set(addrBytes,addrLen,addrPort);
+		} else if (addrLen) {
+			if (ep.unmarshal(reinterpret_cast<const uint8_t *>(addrBytes),(int)addrLen) <= 0) {
+				RR->t->incomingPacketDropped(tPtr,0xbad0f00d,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+				return false;
+			}
+			if ((ep.type() == Endpoint::INETADDR_V4)||(ep.type() == Endpoint::INETADDR_V6))
+				a = ep.inetAddr();
+		}
+
+		if (a) {
+		}
+
+		ptr += (int)addrRecordLen;
+	}
+
+	return true;
 }
 }
 
 
-void VL1::_USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL1::_ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL1::_ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
+	// TODO: not implemented yet
+	return true;
 }
 }
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 9 - 9
node/VL1.hpp

@@ -56,15 +56,15 @@ private:
 	void _sendPendingWhois(void *tPtr,int64_t now);
 	void _sendPendingWhois(void *tPtr,int64_t now);
 
 
 	// Handlers for VL1 verbs
 	// Handlers for VL1 verbs
-	void _HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
+	bool _HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
+	bool _ERROR(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _OK(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _USER_MESSAGE(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _ENCAP(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
 
 
 	const RuntimeEnvironment *RR;
 	const RuntimeEnvironment *RR;
 
 

+ 10 - 10
node/VL2.cpp

@@ -31,43 +31,43 @@ VL2::~VL2()
 {
 {
 }
 }
 
 
-void VL2::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
+bool VL2::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
 {
 {
 }
 }
 
 
-void VL2::_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL2::_EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL2::_MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL2::_NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL2::_NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL2::_NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL2::_MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL2::_MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 
-void VL2::_MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated)
+bool VL2::_MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize)
 {
 {
 }
 }
 
 

+ 9 - 9
node/VL2.hpp

@@ -54,15 +54,15 @@ public:
 	void onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len);
 	void onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len);
 
 
 protected:
 protected:
-	void _FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
-	void _MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize,bool authenticated);
+	bool _FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _EXT_FRAME(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _MULTICAST_LIKE(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _NETWORK_CREDENTIALS(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _NETWORK_CONFIG_REQUEST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _NETWORK_CONFIG(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _MULTICAST_GATHER(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _MULTICAST_FRAME_deprecated(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
+	bool _MULTICAST(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Buf &pkt,int packetSize);
 
 
 private:
 private:
 };
 };

+ 2 - 2
osdep/ManagedRoute.hpp

@@ -17,7 +17,7 @@
 #include "../node/InetAddress.hpp"
 #include "../node/InetAddress.hpp"
 #include "../node/Utils.hpp"
 #include "../node/Utils.hpp"
 #include "../node/SharedPtr.hpp"
 #include "../node/SharedPtr.hpp"
-#include "../node/AtomicCounter.hpp"
+#include "../node/Atomic.hpp"
 
 
 #include <stdexcept>
 #include <stdexcept>
 #include <vector>
 #include <vector>
@@ -83,7 +83,7 @@ private:
 	char _device[128];
 	char _device[128];
 	char _systemDevice[128]; // for route overrides
 	char _systemDevice[128]; // for route overrides
 
 
-	AtomicCounter<int> __refCount;
+	Atomic<int> __refCount;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 2 - 2
root/root.cpp

@@ -122,7 +122,7 @@ using json = nlohmann::json;
  * RootPeer is a normal peer known to this root
  * RootPeer is a normal peer known to this root
  *
  *
  * This struct must remain memcpy-able. Identity, InetAddress, and
  * This struct must remain memcpy-able. Identity, InetAddress, and
- * AtomicCounter all satisfy this. Take care when adding fields that
+ * Atomic all satisfy this. Take care when adding fields that
  * this remains true.
  * this remains true.
  */
  */
 struct RootPeer
 struct RootPeer
@@ -140,7 +140,7 @@ struct RootPeer
 	int vProto;             // Protocol version or -1 if unknown
 	int vProto;             // Protocol version or -1 if unknown
 	int vMajor,vMinor,vRev; // Peer version or -1,-1,-1 if unknown
 	int vMajor,vMinor,vRev; // Peer version or -1,-1,-1 if unknown
 
 
-	AtomicCounter __refCount;
+	Atomic __refCount;
 };
 };
 
 
 // Hashers for std::unordered_map
 // Hashers for std::unordered_map

Some files were not shown because too many files changed in this diff