Browse Source

Create a container for identity hashes, replace some "volatile" hacky junk with std::atomic, clean up some security checks, rename raw data in Buf to unsafeData to permit search for unsafe.

Adam Ierymenko 5 years ago
parent
commit
1e0a930d23
18 changed files with 741 additions and 468 deletions
  1. 189 54
      node/Buf.hpp
  2. 1 0
      node/CMakeLists.txt
  3. 9 20
      node/Constants.hpp
  4. 94 106
      node/Endpoint.cpp
  5. 35 42
      node/Endpoint.hpp
  6. 86 0
      node/H.hpp
  7. 99 65
      node/Identity.cpp
  8. 21 8
      node/Identity.hpp
  9. 9 10
      node/Membership.cpp
  10. 6 7
      node/Network.hpp
  11. 9 7
      node/Peer.cpp
  12. 24 20
      node/Peer.hpp
  13. 7 7
      node/Protocol.cpp
  14. 13 9
      node/Protocol.hpp
  15. 27 18
      node/Topology.cpp
  16. 44 37
      node/Topology.hpp
  17. 6 6
      node/Trace.cpp
  18. 62 52
      node/VL1.cpp

+ 189 - 54
node/Buf.hpp

@@ -147,7 +147,7 @@ public:
 		if (l <= ZT_BUF_MEM_SIZE) {
 			r.b.move(s->b);
 			if (s->s > 0)
-				memmove(r.b->b,r.b->b + s->s,l);
+				memmove(r.b->unsafeData,r.b->unsafeData + s->s,l);
 			r.e = l;
 
 			while (++s != fcv.end()) {
@@ -157,7 +157,7 @@ public:
 					r.e = 0;
 					break;
 				}
-				memcpy(r.b->b + r.e,s->b->b + s->s,l);
+				memcpy(r.b->unsafeData + r.e,s->b->unsafeData + s->s,l);
 				s->b.zero(); // let go of buffer in vector as soon as possible
 				r.e += l;
 			}
@@ -171,12 +171,12 @@ public:
 	 */
 	ZT_ALWAYS_INLINE Buf() noexcept {}
 
-	ZT_ALWAYS_INLINE Buf(const Buf &b2) noexcept { memcpy(b,b2.b,ZT_BUF_MEM_SIZE); }
+	ZT_ALWAYS_INLINE Buf(const Buf &b2) noexcept { memcpy(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE); }
 
 	ZT_ALWAYS_INLINE Buf &operator=(const Buf &b2) noexcept
 	{
 		if (this != &b2)
-			memcpy(b,b2.b,ZT_BUF_MEM_SIZE);
+			memcpy(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE);
 		return *this;
 	}
 
@@ -206,29 +206,29 @@ public:
 	/**
 	 * Set all memory to zero
 	 */
-	ZT_ALWAYS_INLINE void clear() noexcept { memset(b,0,ZT_BUF_MEM_SIZE); }
+	ZT_ALWAYS_INLINE void clear() noexcept { memset(unsafeData,0,ZT_BUF_MEM_SIZE); }
 
 	/**
 	 * Zero security critical data using Utils::burn() to ensure it's never optimized out.
 	 */
-	ZT_ALWAYS_INLINE void burn() noexcept { Utils::burn(b,ZT_BUF_MEM_SIZE); }
+	ZT_ALWAYS_INLINE void burn() noexcept { Utils::burn(unsafeData,ZT_BUF_MEM_SIZE); }
 
 	/**
 	 * Read a byte
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by 1)
 	 * @return Byte (undefined on overflow)
 	 */
 	ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const noexcept
 	{
 		const int s = ii++;
-		return b[(unsigned int)s & ZT_BUF_MEM_MASK];
+		return unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK];
 	}
 
 	/**
 	 * Read a 16-bit integer
 	 *
-	 * @param ii Integer
+	 * @param ii Index value-result parameter (incremented by 2)
 	 * @return Integer (undefined on overflow)
 	 */
 	ZT_ALWAYS_INLINE uint16_t rI16(int &ii) const noexcept
@@ -240,14 +240,14 @@ public:
 			((uint16_t)data.bytes[s] << 8U) |
 			(uint16_t)data.bytes[s + 1]);
 #else
-		return Utils::ntoh(*reinterpret_cast<const uint16_t *>(b + s));
+		return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
 #endif
 	}
 
 	/**
 	 * Read a 32-bit integer
 	 *
-	 * @param ii Integer
+	 * @param ii Index value-result parameter (incremented by 4)
 	 * @return Integer (undefined on overflow)
 	 */
 	ZT_ALWAYS_INLINE uint32_t rI32(int &ii) const noexcept
@@ -261,14 +261,14 @@ public:
 			((uint32_t)data.bytes[s + 2] << 8U) |
 			(uint32_t)data.bytes[s + 3]);
 #else
-		return Utils::ntoh(*reinterpret_cast<const uint32_t *>(b + s));
+		return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
 #endif
 	}
 
 	/**
 	 * Read a 64-bit integer
 	 *
-	 * @param ii Integer
+	 * @param ii Index value-result parameter (incremented by 8)
 	 * @return Integer (undefined on overflow)
 	 */
 	ZT_ALWAYS_INLINE uint64_t rI64(int &ii) const noexcept
@@ -286,7 +286,7 @@ public:
 			((uint64_t)data.bytes[s + 6] << 8U) |
 			(uint64_t)data.bytes[s + 7]);
 #else
-		return Utils::ntoh(*reinterpret_cast<const uint64_t *>(b + s));
+		return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
 #endif
 	}
 
@@ -301,7 +301,7 @@ public:
 	 * indicates.
 	 *
 	 * @tparam T Object type
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by object's size in bytes)
 	 * @param obj Object to read
 	 * @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
 	 */
@@ -309,7 +309,7 @@ public:
 	ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const noexcept
 	{
 		if (ii < ZT_BUF_MEM_SIZE) {
-			int ms = obj.unmarshal(b + ii,ZT_BUF_MEM_SIZE - ii);
+			int ms = obj.unmarshal(unsafeData + ii,ZT_BUF_MEM_SIZE - ii);
 			if (ms > 0)
 				ii += ms;
 			return ms;
@@ -323,17 +323,17 @@ public:
 	 * Use this if the buffer's memory may get changed between reading and processing
 	 * what is read.
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by length of string)
 	 * @param buf Buffer to receive string
 	 * @param bufSize Capacity of buffer in bytes
 	 * @return Pointer to buf or NULL on overflow or error
 	 */
 	ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
 	{
-		const char *const s = (const char *)(b + ii);
+		const char *const s = (const char *)(unsafeData + ii);
 		const int sii = ii;
 		while (ii < ZT_BUF_MEM_SIZE) {
-			if (b[ii++] == 0) {
+			if (unsafeData[ii++] == 0) {
 				memcpy(buf,s,ii - sii);
 				return buf;
 			}
@@ -351,14 +351,14 @@ public:
 	 * This version avoids a copy and so is faster if the buffer won't be modified between
 	 * reading and processing.
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by length of string)
 	 * @return Pointer to null-terminated C-style string or NULL on overflow or error
 	 */
 	ZT_ALWAYS_INLINE const char *rSnc(int &ii) const noexcept
 	{
-		const char *const s = (const char *)(b + ii);
+		const char *const s = (const char *)(unsafeData + ii);
 		while (ii < ZT_BUF_MEM_SIZE) {
-			if (b[ii++] == 0)
+			if (unsafeData[ii++] == 0)
 				return s;
 		}
 		return nullptr;
@@ -370,15 +370,15 @@ public:
 	 * Use this if the buffer's memory may get changed between reading and processing
 	 * what is read.
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by len)
 	 * @param bytes Buffer to contain data to read
 	 * @param len Length of buffer
 	 * @return Pointer to data or NULL on overflow or error
 	 */
-	ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *bytes,unsigned int len) const noexcept
+	ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *const bytes,const unsigned int len) const noexcept
 	{
 		if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
-			memcpy(bytes,b + ii,len);
+			memcpy(bytes,unsafeData + ii,len);
 			return reinterpret_cast<uint8_t *>(bytes);
 		}
 		return nullptr;
@@ -393,7 +393,7 @@ public:
 	 * This version avoids a copy and so is faster if the buffer won't be modified between
 	 * reading and processing.
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by len)
 	 * @param len Length of data field to obtain a pointer to
 	 * @return Pointer to field or NULL on overflow
 	 */
@@ -403,25 +403,102 @@ public:
 		return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
 	}
 
+	/**
+	 * Load a value at an index without advancing the index
+	 *
+	 * Note that unlike the rI??() methods this does not increment ii and therefore
+	 * will not necessarily result in a 'true' return from readOverflow(). It does
+	 * however subject 'ii' to soft bounds masking like the gI??() methods.
+	 */
+	ZT_ALWAYS_INLINE uint8_t lI8(const int ii) const noexcept
+	{
+		return unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK];
+	}
+
+	/**
+	 * Load a value at an index without advancing the index
+	 *
+	 * Note that unlike the rI??() methods this does not increment ii and therefore
+	 * will not necessarily result in a 'true' return from readOverflow(). It does
+	 * however subject 'ii' to soft bounds masking like the gI??() methods.
+	 */
+	ZT_ALWAYS_INLINE uint16_t lI16(const int ii) const noexcept
+	{
+		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
+#ifdef ZT_NO_UNALIGNED_ACCESS
+		return (
+			((uint16_t)data.bytes[s] << 8U) |
+			(uint16_t)data.bytes[s + 1]);
+#else
+		return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
+#endif
+	}
+
+	/**
+	 * Load a value at an index without advancing the index
+	 *
+	 * Note that unlike the rI??() methods this does not increment ii and therefore
+	 * will not necessarily result in a 'true' return from readOverflow(). It does
+	 * however subject 'ii' to soft bounds masking like the gI??() methods.
+	 */
+	ZT_ALWAYS_INLINE uint32_t lI32(const int ii) const noexcept
+	{
+		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
+#ifdef ZT_NO_UNALIGNED_ACCESS
+		return (
+			((uint32_t)data.bytes[s] << 24U) |
+			((uint32_t)data.bytes[s + 1] << 16U) |
+			((uint32_t)data.bytes[s + 2] << 8U) |
+			(uint32_t)data.bytes[s + 3]);
+#else
+		return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
+#endif
+	}
+
+	/**
+	 * Load a value at an index without advancing the index
+	 *
+	 * Note that unlike the rI??() methods this does not increment ii and therefore
+	 * will not necessarily result in a 'true' return from readOverflow(). It does
+	 * however subject 'ii' to soft bounds masking like the gI??() methods.
+	 */
+	ZT_ALWAYS_INLINE uint8_t lI64(const int ii) const noexcept
+	{
+		const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
+#ifdef ZT_NO_UNALIGNED_ACCESS
+		return (
+			((uint64_t)data.bytes[s] << 56U) |
+			((uint64_t)data.bytes[s + 1] << 48U) |
+			((uint64_t)data.bytes[s + 2] << 40U) |
+			((uint64_t)data.bytes[s + 3] << 32U) |
+			((uint64_t)data.bytes[s + 4] << 24U) |
+			((uint64_t)data.bytes[s + 5] << 16U) |
+			((uint64_t)data.bytes[s + 6] << 8U) |
+			(uint64_t)data.bytes[s + 7]);
+#else
+		return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
+#endif
+	}
+
 	/**
 	 * Write a byte
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by 1)
 	 * @param n Byte
 	 */
-	ZT_ALWAYS_INLINE void wI(int &ii,uint8_t n) noexcept
+	ZT_ALWAYS_INLINE void wI8(int &ii,const uint8_t n) noexcept
 	{
 		const int s = ii++;
-		b[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
+		unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
 	}
 
 	/**
 	 * Write a 16-bit integer in big-endian byte order
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by 2)
 	 * @param n Integer
 	 */
-	ZT_ALWAYS_INLINE void wI(int &ii,uint16_t n) noexcept
+	ZT_ALWAYS_INLINE void wI16(int &ii,const uint16_t n) noexcept
 	{
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		ii += 2;
@@ -429,17 +506,17 @@ public:
 		b[s] = (uint8_t)(n >> 8U);
 		b[s + 1] = (uint8_t)n;
 #else
-		*reinterpret_cast<uint16_t *>(b + s) = Utils::hton(n);
+		*reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
 #endif
 	}
 
 	/**
 	 * Write a 32-bit integer in big-endian byte order
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by 4)
 	 * @param n Integer
 	 */
-	ZT_ALWAYS_INLINE void wI(int &ii,uint32_t n) noexcept
+	ZT_ALWAYS_INLINE void wI32(int &ii,const uint32_t n) noexcept
 	{
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		ii += 4;
@@ -449,17 +526,17 @@ public:
 		b[s + 2] = (uint8_t)(n >> 8U);
 		b[s + 3] = (uint8_t)n;
 #else
-		*reinterpret_cast<uint32_t *>(b + s) = Utils::hton(n);
+		*reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
 #endif
 	}
 
 	/**
 	 * Write a 64-bit integer in big-endian byte order
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by 8)
 	 * @param n Integer
 	 */
-	ZT_ALWAYS_INLINE void wI(int &ii,uint64_t n) noexcept
+	ZT_ALWAYS_INLINE void wI64(int &ii,const uint64_t n) noexcept
 	{
 		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
 		ii += 8;
@@ -473,7 +550,7 @@ public:
 		b[s + 6] = (uint8_t)(n >> 8U);
 		b[s + 7] = (uint8_t)n;
 #else
-		*reinterpret_cast<uint64_t *>(b + s) = Utils::hton(n);
+		*reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
 #endif
 	}
 
@@ -481,7 +558,7 @@ public:
 	 * Write an object implementing the marshal interface
 	 *
 	 * @tparam T Object type
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by size of object)
 	 * @param t Object to write
 	 */
 	template<typename T>
@@ -489,7 +566,7 @@ public:
 	{
 		const int s = ii;
 		if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
-			int ms = t.marshal(b + s);
+			int ms = t.marshal(unsafeData + s);
 			if (ms > 0)
 				ii += ms;
 		} else {
@@ -500,7 +577,7 @@ public:
 	/**
 	 * Write a C-style null-terminated string (including the trailing zero)
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by length of string)
 	 * @param s String to write (writes an empty string if this is NULL)
 	 */
 	ZT_ALWAYS_INLINE void wS(int &ii,const char *s) noexcept
@@ -509,17 +586,17 @@ public:
 			char c;
 			do {
 				c = *(s++);
-				wI(ii,(uint8_t)c);
+				wI8(ii,(uint8_t)c);
 			} while (c);
 		} else {
-			wI(ii,(uint8_t)0);
+			wI8(ii,0);
 		}
 	}
 
 	/**
 	 * Write a byte array
 	 *
-	 * @param ii Iterator
+	 * @param ii Index value-result parameter (incremented by len)
 	 * @param bytes Bytes to write
 	 * @param len Size of data in bytes
 	 */
@@ -527,7 +604,65 @@ public:
 	{
 		const int s = ii;
 		if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
-			memcpy(b + s,bytes,len);
+			memcpy(unsafeData + s,bytes,len);
+	}
+
+	/**
+	 * Store a byte without advancing the index
+	 */
+	ZT_ALWAYS_INLINE void sI8(const int ii,const uint8_t n) noexcept
+	{
+		unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK] = n;
+	}
+
+	/**
+	 * Store an integer without advancing the index
+	 */
+	ZT_ALWAYS_INLINE void sI16(const int ii,const uint16_t n) noexcept
+	{
+		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
+#ifdef ZT_NO_UNALIGNED_ACCESS
+		b[s] = (uint8_t)(n >> 8U);
+		b[s + 1] = (uint8_t)n;
+#else
+		*reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
+#endif
+	}
+
+	/**
+	 * Store an integer without advancing the index
+	 */
+	ZT_ALWAYS_INLINE void sI32(const int ii,const uint32_t n) noexcept
+	{
+		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
+#ifdef ZT_NO_UNALIGNED_ACCESS
+		b[s] = (uint8_t)(n >> 24U);
+		b[s + 1] = (uint8_t)(n >> 16U);
+		b[s + 2] = (uint8_t)(n >> 8U);
+		b[s + 3] = (uint8_t)n;
+#else
+		*reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
+#endif
+	}
+
+	/**
+	 * Store an integer without advancing the index
+	 */
+	ZT_ALWAYS_INLINE void sI64(const int ii,const uint64_t n) noexcept
+	{
+		const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
+#ifdef ZT_NO_UNALIGNED_ACCESS
+		b[s] = (uint8_t)(n >> 56U);
+		b[s + 1] = (uint8_t)(n >> 48U);
+		b[s + 2] = (uint8_t)(n >> 40U);
+		b[s + 3] = (uint8_t)(n >> 32U);
+		b[s + 4] = (uint8_t)(n >> 24U);
+		b[s + 5] = (uint8_t)(n >> 16U);
+		b[s + 6] = (uint8_t)(n >> 8U);
+		b[s + 7] = (uint8_t)n;
+#else
+		*reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
+#endif
 	}
 
 	/**
@@ -547,7 +682,7 @@ public:
 	 * @return Reference to 'b' cast to type T
 	 */
 	template<typename T>
-	ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) noexcept { return *reinterpret_cast<T *>(b + i); }
+	ZT_ALWAYS_INLINE T &as(const unsigned int i = 0) noexcept { return *reinterpret_cast<T *>(unsafeData + i); }
 
 	/**
 	 * Cast data in 'b' to a (usually packed) structure type (const)
@@ -561,14 +696,14 @@ public:
 	 * @return Reference to 'b' cast to type T
 	 */
 	template<typename T>
-	ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const noexcept { return *reinterpret_cast<const T *>(b + i); }
+	ZT_ALWAYS_INLINE const T &as(const unsigned int i = 0) const noexcept { return *reinterpret_cast<const T *>(unsafeData + i); }
 
-	ZT_ALWAYS_INLINE bool operator==(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) == 0); }
-	ZT_ALWAYS_INLINE bool operator!=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) != 0); }
-	ZT_ALWAYS_INLINE bool operator<(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) < 0); }
-	ZT_ALWAYS_INLINE bool operator<=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) <= 0); }
-	ZT_ALWAYS_INLINE bool operator>(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) > 0); }
-	ZT_ALWAYS_INLINE bool operator>=(const Buf &b2) const noexcept { return (memcmp(b,b2.b,ZT_BUF_MEM_SIZE) >= 0); }
+	ZT_ALWAYS_INLINE bool operator==(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) == 0); }
+	ZT_ALWAYS_INLINE bool operator!=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) != 0); }
+	ZT_ALWAYS_INLINE bool operator<(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) < 0); }
+	ZT_ALWAYS_INLINE bool operator<=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) <= 0); }
+	ZT_ALWAYS_INLINE bool operator>(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) > 0); }
+	ZT_ALWAYS_INLINE bool operator>=(const Buf &b2) const noexcept { return (memcmp(unsafeData,b2.unsafeData,ZT_BUF_MEM_SIZE) >= 0); }
 
 	/**
 	 * Raw data held in buffer
@@ -577,7 +712,7 @@ public:
 	 * They exist to allow reads and writes of integer types to silently overflow if a
 	 * read or write is performed at the end of the buffer.
 	 */
-	uint8_t b[ZT_BUF_MEM_SIZE + 8];
+	uint8_t unsafeData[ZT_BUF_MEM_SIZE + 8];
 
 private:
 	// Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise

+ 1 - 0
node/CMakeLists.txt

@@ -16,6 +16,7 @@ set(core_headers
 	ECC384.hpp
 	Expect.hpp
 	FCV.hpp
+	H.hpp
 	Hashtable.hpp
 	Identity.hpp
 	InetAddress.hpp

+ 9 - 20
node/Constants.hpp

@@ -66,16 +66,6 @@
  */
 #define ZT_MAX_NETWORK_CONFIG_BYTES 131072
 
-/**
- * Size of RX queue in packets
- */
-#define ZT_RX_QUEUE_SIZE 32
-
-/**
- * Size of TX queue in packets
- */
-#define ZT_TX_QUEUE_SIZE 32
-
 /**
  * Length of peer shared secrets (256-bit, do not change)
  */
@@ -109,16 +99,6 @@
  */
 #define ZT_WHOIS_RETRY_DELAY 500
 
-/**
- * Transmit queue entry timeout
- */
-#define ZT_TRANSMIT_QUEUE_TIMEOUT 5000
-
-/**
- * Receive queue entry timeout
- */
-#define ZT_RECEIVE_QUEUE_TIMEOUT 5000
-
 /**
  * Maximum number of ZT hops allowed (this is not IP hops/TTL)
  *
@@ -156,6 +136,15 @@
  */
 #define ZT_PEER_ACTIVITY_TIMEOUT (ZT_PEER_PING_PERIOD + 5000)
 
+/**
+ * Global timeout for peers in milliseconds
+ *
+ * This is global as in "entire world," and this value is 30 days. In this
+ * code the global timeout is used to determine when to ignore cached
+ * peers and their identity<>address mappings.
+ */
+#define ZT_PEER_GLOBAL_TIMEOUT 2592000000LL
+
 /**
  * Maximum interval between sort/prioritize of paths for a peer
  */

+ 94 - 106
node/Endpoint.cpp

@@ -19,14 +19,13 @@ bool Endpoint::operator==(const Endpoint &ep) const
 {
 	if (_t == ep._t) {
 		switch(_t) {
-			default:          return true;
-			case INETADDR_V4:
-			case INETADDR_V6: return (inetAddr() == ep.inetAddr());
-			case DNSNAME:     return ((_v.dns.port == ep._v.dns.port)&&(strcmp(_v.dns.name,ep._v.dns.name) == 0));
-			case ZEROTIER:    return ((_v.zt.a == ep._v.zt.a)&&(memcmp(_v.zt.idh,ep._v.zt.idh,sizeof(_v.zt.idh)) == 0));
-			case URL:         return (strcmp(_v.url,ep._v.url) == 0);
-			case ETHERNET:    return (_v.eth == ep._v.eth);
-			case WEBRTC:      return ((_v.webrtc.offerLen == ep._v.webrtc.offerLen)&&(memcmp(_v.webrtc.offer,ep._v.webrtc.offer,_v.webrtc.offerLen) == 0));
+			default:               return true;
+			case TYPE_ZEROTIER:    return ((_v.zt.a == ep._v.zt.a)&&(memcmp(_v.zt.idh,ep._v.zt.idh,sizeof(_v.zt.idh)) == 0));
+			case TYPE_DNSNAME:     return ((_v.dns.port == ep._v.dns.port)&&(strcmp(_v.dns.name,ep._v.dns.name) == 0));
+			case TYPE_URL:         return (strcmp(_v.url,ep._v.url) == 0);
+			case TYPE_ETHERNET:    return (_v.eth == ep._v.eth);
+			case TYPE_INETADDR_V4:
+			case TYPE_INETADDR_V6: return (inetAddr() == ep.inetAddr());
 		}
 	}
 	return false;
@@ -39,21 +38,15 @@ bool Endpoint::operator<(const Endpoint &ep) const
 	} else if (_t == ep._t) {
 		int ncmp;
 		switch(_t) {
-			case INETADDR_V4:
-			case INETADDR_V6:
-				return (inetAddr() < ep.inetAddr());
-			case DNSNAME:
+			case TYPE_ZEROTIER:    return (_v.zt.a < ep._v.zt.a) ? true : ((_v.zt.a == ep._v.zt.a)&&(memcmp(_v.zt.idh,ep._v.zt.idh,sizeof(_v.zt.idh)) < 0));
+			case TYPE_DNSNAME:
 				ncmp = strcmp(_v.dns.name,ep._v.dns.name);
-				return ((ncmp < 0) ? true : (ncmp == 0)&&(_v.dns.port < ep._v.dns.port));
-			case ZEROTIER: return (_v.zt.a < ep._v.zt.a) ? true : ((_v.zt.a == ep._v.zt.a)&&(memcmp(_v.zt.idh,ep._v.zt.idh,sizeof(_v.zt.idh)) < 0));
-			case URL:      return (strcmp(_v.url,ep._v.url) < 0);
-			case ETHERNET: return (_v.eth < ep._v.eth);
-			case WEBRTC:
-				if (_v.webrtc.offerLen < ep._v.webrtc.offerLen)
-					return true;
-				else if (_v.webrtc.offerLen == ep._v.webrtc.offerLen)
-					return memcmp(_v.webrtc.offer,ep._v.webrtc.offer,_v.webrtc.offerLen) < 0;
-			default:       return false;
+				                     return ((ncmp < 0) ? true : (ncmp == 0)&&(_v.dns.port < ep._v.dns.port));
+			case TYPE_URL:         return (strcmp(_v.url,ep._v.url) < 0);
+			case TYPE_ETHERNET:    return (_v.eth < ep._v.eth);
+			case TYPE_INETADDR_V4:
+			case TYPE_INETADDR_V6: return (inetAddr() < ep.inetAddr());
+			default:               return false;
 		}
 	}
 	return false;
@@ -67,10 +60,15 @@ int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept
 	Utils::storeBigEndian(data + 3,(int16_t)_l[1]);
 	Utils::storeBigEndian(data + 5,(int16_t)_l[2]);
 	switch(_t) {
-		case INETADDR_V4:
-		case INETADDR_V6:
-			return 7 + reinterpret_cast<const InetAddress *>(&_v.sa)->marshal(data+1);
-		case DNSNAME:
+		case TYPE_ZEROTIER:
+			data[7] = (uint8_t)(_v.zt.a >> 32U);
+			data[8] = (uint8_t)(_v.zt.a >> 24U);
+			data[9] = (uint8_t)(_v.zt.a >> 16U);
+			data[10] = (uint8_t)(_v.zt.a >> 8U);
+			data[11] = (uint8_t)_v.zt.a;
+			memcpy(data + 12,_v.zt.idh,ZT_IDENTITY_HASH_SIZE);
+			return ZT_IDENTITY_HASH_SIZE + 12;
+		case TYPE_DNSNAME:
 			p = 7;
 			for (;;) {
 				if ((data[p] = (uint8_t)_v.dns.name[p-1]) == 0)
@@ -82,15 +80,7 @@ int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept
 			data[p++] = (uint8_t)(_v.dns.port >> 8U);
 			data[p++] = (uint8_t)_v.dns.port;
 			return p;
-		case ZEROTIER:
-			data[7] = (uint8_t)(_v.zt.a >> 32U);
-			data[8] = (uint8_t)(_v.zt.a >> 24U);
-			data[9] = (uint8_t)(_v.zt.a >> 16U);
-			data[10] = (uint8_t)(_v.zt.a >> 8U);
-			data[11] = (uint8_t)_v.zt.a;
-			memcpy(data + 12,_v.zt.idh,ZT_IDENTITY_HASH_SIZE);
-			return ZT_IDENTITY_HASH_SIZE + 12;
-		case URL:
+		case TYPE_URL:
 			p = 7;
 			for (;;) {
 				if ((data[p] = (uint8_t)_v.url[p-1]) == 0)
@@ -100,7 +90,7 @@ int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept
 					return -1;
 			}
 			return p;
-		case ETHERNET:
+		case TYPE_ETHERNET:
 			data[7] = (uint8_t)(_v.eth >> 40U);
 			data[8] = (uint8_t)(_v.eth >> 32U);
 			data[9] = (uint8_t)(_v.eth >> 24U);
@@ -108,12 +98,14 @@ int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept
 			data[11] = (uint8_t)(_v.eth >> 8U);
 			data[12] = (uint8_t)_v.eth;
 			return 13;
-		case WEBRTC:
-			Utils::storeBigEndian(data + 7,(uint16_t)_v.webrtc.offerLen);
-			memcpy(data + 9,_v.webrtc.offer,_v.webrtc.offerLen);
-			return 9 + _v.webrtc.offerLen;
+		case TYPE_INETADDR_V4:
+		case TYPE_INETADDR_V6:
+			p = asInetAddress(_v.sa).marshal(data + 7);
+			if (p < 0)
+				return p;
+			return 7 + p;
 		default:
-			data[0] = (uint8_t)NIL;
+			data[0] = (uint8_t)TYPE_NIL;
 			return 7;
 	}
 }
@@ -128,75 +120,71 @@ int Endpoint::unmarshal(const uint8_t *restrict data,const int len) noexcept
 	_l[1] = Utils::loadBigEndian<int16_t>(data + 3);
 	_l[2] = Utils::loadBigEndian<int16_t>(data + 5);
   switch(_t) {
-		case NIL:
+		case TYPE_NIL:
 			return 7;
-		case INETADDR_V4:
-		case INETADDR_V6:
-			return 7 + reinterpret_cast<InetAddress *>(&_v.sa)->unmarshal(data+7,len-7);
-		case DNSNAME:
-			if (len < 10)
-				return -1;
-			p = 7;
-			for (;;) {
-				if ((_v.dns.name[p-1] = (char)data[p]) == 0) {
-					++p;
-					break;
-				}
-				++p;
-				if ((p >= (ZT_ENDPOINT_MARSHAL_SIZE_MAX-2))||(p >= (len-2)))
-					return -1;
-			}
-			_v.dns.port = (uint16_t)(((unsigned int)data[p++]) << 8U);
-			_v.dns.port |= (uint16_t)data[p++];
-			return p;
-		case ZEROTIER:
-			if (len < 60)
-				return -1;
-			_v.zt.a = ((uint64_t)data[7]) << 32U;
-			_v.zt.a |= ((uint64_t)data[8]) << 24U;
-			_v.zt.a |= ((uint64_t)data[9]) << 16U;
-			_v.zt.a |= ((uint64_t)data[10]) << 8U;
-			_v.zt.a |= (uint64_t)data[11];
-			memcpy(_v.zt.idh,data + 12,48);
-			return 60;
-		case URL:
-			if (len < 8)
-				return -1;
-			p = 7;
-			for (;;) {
-				if ((_v.url[p-1] = (char)data[p]) == 0) {
-					++p;
-					break;
-				}
-				++p;
-				if ((p >= (ZT_ENDPOINT_MAX_NAME_SIZE+1))||(p >= len))
-					return -1;
-			}
-			return p;
-		case ETHERNET:
-			if (len < 13)
+	  case TYPE_ZEROTIER:
+		  if (len < (12 + ZT_IDENTITY_HASH_SIZE))
+			  return -1;
+		  _v.zt.a = ((uint64_t)data[7]) << 32U;
+		  _v.zt.a |= ((uint64_t)data[8]) << 24U;
+		  _v.zt.a |= ((uint64_t)data[9]) << 16U;
+		  _v.zt.a |= ((uint64_t)data[10]) << 8U;
+		  _v.zt.a |= (uint64_t)data[11];
+		  memcpy(_v.zt.idh,data + 12,ZT_IDENTITY_HASH_SIZE);
+		  return 60;
+	  case TYPE_DNSNAME:
+		  if (len < 10)
+			  return -1;
+		  p = 7;
+		  for (;;) {
+			  if ((_v.dns.name[p-1] = (char)data[p]) == 0) {
+				  ++p;
+				  break;
+			  }
+			  ++p;
+			  if ((p >= (ZT_ENDPOINT_MARSHAL_SIZE_MAX-2))||(p >= (len-2)))
+				  return -1;
+		  }
+		  _v.dns.port = (uint16_t)(((unsigned int)data[p++]) << 8U);
+		  _v.dns.port |= (uint16_t)data[p++];
+		  return p;
+	  case TYPE_URL:
+		  if (len < 8)
+			  return -1;
+		  p = 7;
+		  for (;;) {
+			  if ((_v.url[p-1] = (char)data[p]) == 0) {
+				  ++p;
+				  break;
+			  }
+			  ++p;
+			  if ((p >= (ZT_ENDPOINT_MAX_NAME_SIZE+1))||(p >= len))
+				  return -1;
+		  }
+		  return p;
+	  case TYPE_ETHERNET:
+		  if (len < 13)
+			  return -1;
+		  _v.eth = ((uint64_t)data[7]) << 40U;
+		  _v.eth |= ((uint64_t)data[8]) << 32U;
+		  _v.eth |= ((uint64_t)data[9]) << 24U;
+		  _v.eth |= ((uint64_t)data[10]) << 16U;
+		  _v.eth |= ((uint64_t)data[11]) << 8U;
+		  _v.eth |= (uint64_t)data[12];
+		  return 13;
+		case TYPE_INETADDR_V4:
+		case TYPE_INETADDR_V6:
+			p = asInetAddress(_v.sa).unmarshal(data + 7,len - 7);
+			if (p <= 0)
 				return -1;
-			_v.eth = ((uint64_t)data[7]) << 40U;
-			_v.eth |= ((uint64_t)data[8]) << 32U;
-			_v.eth |= ((uint64_t)data[9]) << 24U;
-			_v.eth |= ((uint64_t)data[10]) << 16U;
-			_v.eth |= ((uint64_t)data[11]) << 8U;
-			_v.eth |= (uint64_t)data[12];
-			return 13;
-  	case WEBRTC:
-  		if (len < 9)
-  			return -1;
-  		_v.webrtc.offerLen = Utils::loadBigEndian<uint16_t>(data + 7);
-  		if ((len < (9 + _v.webrtc.offerLen))||(_v.webrtc.offerLen > ZT_ENDPOINT_MAX_NAME_SIZE))
-  			return -1;
-  		memcpy(_v.webrtc.offer,data + 9,_v.webrtc.offerLen);
-  		return 9 + _v.webrtc.offerLen;
+			return 7 + p;
 		default:
-			// Unrecognized endpoint types not yet specified must start with a byte
-			// length size so that older versions of ZeroTier can skip them.
-			if (len < 8)
+			// Unrecognized endpoint types not yet specified must start with a 16-bit
+			// length so that older versions of ZeroTier can skip them.
+			if (len < 9)
 				return -1;
-			return 8 + (int)data[7];
+			p = 9 + (int)Utils::loadBigEndian<uint16_t>(data + 7);
+			return (p > len) ? -1 : p;
 	}
 }
 

+ 35 - 42
node/Endpoint.hpp

@@ -39,99 +39,96 @@ namespace ZeroTier {
 class Endpoint : public TriviallyCopyable
 {
 public:
+	/**
+	 * Endpoint type
+	 */
 	enum Type
 	{
-		NIL =          0,   // NIL value
-		INETADDR_V4 =  1,   // IPv4
-		INETADDR_V6 =  2,   // IPv6
-		DNSNAME =      3,   // DNS name and port that resolves to InetAddress
-		ZEROTIER =     4,   // ZeroTier Address (for relaying and meshy behavior)
-		URL =          5,   // URL for http/https/ws/etc. (not implemented yet)
-		ETHERNET =     6,   // 48-bit LAN-local Ethernet address
-		WEBRTC =       7,   // WebRTC data channels
-		UNRECOGNIZED = 255  // Unrecognized endpoint type encountered in stream
+		TYPE_NIL =          0,          // NIL value
+		TYPE_ZEROTIER =     1,          // ZeroTier Address (for relaying and meshy behavior)
+		TYPE_DNSNAME =      2,          // DNS name and port that resolves to InetAddress
+		TYPE_URL =          3,          // URL for HTTP or Web Sockets transport
+		TYPE_INETADDR_V4 =  4,          // IPv4
+		TYPE_ETHERNET =     5,          // 48-bit LAN-local Ethernet address
+		TYPE_INETADDR_V6 =  6           // IPv6
 	};
 
 	ZT_ALWAYS_INLINE Endpoint() noexcept { memoryZero(this); }
 
-	explicit ZT_ALWAYS_INLINE Endpoint(const InetAddress &sa) { *this = sa; }
+	ZT_ALWAYS_INLINE Endpoint(const InetAddress &sa)
+	{
+		switch (sa.ss_family) {
+			case AF_INET:
+				_t = TYPE_INETADDR_V4;
+				break;
+			case AF_INET6:
+				_t = TYPE_INETADDR_V6;
+			default:
+				_t = TYPE_NIL;
+				return;
+		}
+		asInetAddress(_v.sa) = sa;
+	}
 
 	ZT_ALWAYS_INLINE Endpoint(const Address &zt,const uint8_t identityHash[ZT_IDENTITY_HASH_SIZE]) :
-		_t(ZEROTIER)
+		_t(TYPE_ZEROTIER)
 	{
 		_v.zt.a = zt.toInt();
 		memcpy(_v.zt.idh,identityHash,ZT_IDENTITY_HASH_SIZE);
 	}
 
 	ZT_ALWAYS_INLINE Endpoint(const char *name,const int port) :
-		_t(DNSNAME)
+		_t(TYPE_DNSNAME)
 	{
 		_v.dns.port = port;
 		Utils::scopy(_v.dns.name,sizeof(_v.dns.name),name);
 	}
 
 	explicit ZT_ALWAYS_INLINE Endpoint(const char *url) :
-		_t(URL)
+		_t(TYPE_URL)
 	{ Utils::scopy(_v.url,sizeof(_v.url),url); }
 
-	ZT_ALWAYS_INLINE Endpoint &operator=(const InetAddress &sa)
-	{
-		switch(sa.ss_family) {
-			case AF_INET:
-				_t = INETADDR_V4;
-				break;
-			case AF_INET6:
-				_t = INETADDR_V6;
-				break;
-			default:
-				_t = NIL;
-				return *this;
-		}
-		_v.sa = sa;
-		return *this;
-	}
-
 	/**
 	 * @return InetAddress or NIL if not of this type
 	 */
-	ZT_ALWAYS_INLINE const InetAddress &inetAddr() const noexcept { return ((_t == INETADDR_V4)||(_t == INETADDR_V6)) ? *reinterpret_cast<const InetAddress *>(&_v.sa) : InetAddress::NIL; }
+	ZT_ALWAYS_INLINE const InetAddress &inetAddr() const noexcept { return ((_t == TYPE_INETADDR_V4)||(_t == TYPE_INETADDR_V6)) ? asInetAddress(_v.sa) : InetAddress::NIL; }
 
 	/**
 	 * @return DNS name or empty string if not of this type
 	 */
-	ZT_ALWAYS_INLINE const char *dnsName() const noexcept { return (_t == DNSNAME) ? _v.dns.name : ""; }
+	ZT_ALWAYS_INLINE const char *dnsName() const noexcept { return (_t == TYPE_DNSNAME) ? _v.dns.name : ""; }
 
 	/**
 	 * @return Port associated with DNS name or -1 if not of this type
 	 */
-	ZT_ALWAYS_INLINE int dnsPort() const noexcept { return (_t == DNSNAME) ? _v.dns.port : -1; }
+	ZT_ALWAYS_INLINE int dnsPort() const noexcept { return (_t == TYPE_DNSNAME) ? _v.dns.port : -1; }
 
 	/**
 	 * @return ZeroTier address or NIL if not of this type
 	 */
-	ZT_ALWAYS_INLINE Address ztAddress() const noexcept { return Address((_t == ZEROTIER) ? _v.zt.a : (uint64_t)0); }
+	ZT_ALWAYS_INLINE Address ztAddress() const noexcept { return Address((_t == TYPE_ZEROTIER) ? _v.zt.a : (uint64_t)0); }
 
 	/**
 	 * @return 384-bit hash of identity keys or NULL if not of this type
 	 */
-	ZT_ALWAYS_INLINE const uint8_t *ztIdentityHash() const noexcept { return (_t == ZEROTIER) ? _v.zt.idh : nullptr; }
+	ZT_ALWAYS_INLINE const uint8_t *ztIdentityHash() const noexcept { return (_t == TYPE_ZEROTIER) ? _v.zt.idh : nullptr; }
 
 	/**
 	 * @return URL or empty string if not of this type
 	 */
-	ZT_ALWAYS_INLINE const char *url() const noexcept { return (_t == URL) ? _v.url : ""; }
+	ZT_ALWAYS_INLINE const char *url() const noexcept { return (_t == TYPE_URL) ? _v.url : ""; }
 
 	/**
 	 * @return Ethernet address or NIL if not of this type
 	 */
-	ZT_ALWAYS_INLINE MAC ethernet() const noexcept { return (_t == ETHERNET) ? MAC(_v.eth) : MAC(); }
+	ZT_ALWAYS_INLINE MAC ethernet() const noexcept { return (_t == TYPE_ETHERNET) ? MAC(_v.eth) : MAC(); }
 
 	/**
 	 * @return Endpoint type or NIL if unset/empty
 	 */
 	ZT_ALWAYS_INLINE Type type() const noexcept { return _t; }
 
-	explicit ZT_ALWAYS_INLINE operator bool() const noexcept { return _t != NIL; }
+	explicit ZT_ALWAYS_INLINE operator bool() const noexcept { return _t != TYPE_NIL; }
 
 	bool operator==(const Endpoint &ep) const;
 	ZT_ALWAYS_INLINE bool operator!=(const Endpoint &ep) const { return (!(*this == ep)); }
@@ -153,10 +150,6 @@ private:
 			uint16_t port;
 			char name[ZT_ENDPOINT_MAX_NAME_SIZE];
 		} dns;
-		struct {
-			uint16_t offerLen;
-			uint8_t offer[ZT_ENDPOINT_MAX_NAME_SIZE];
-		} webrtc;
 		struct {
 			uint64_t a;
 			uint8_t idh[ZT_IDENTITY_HASH_SIZE];

+ 86 - 0
node/H.hpp

@@ -0,0 +1,86 @@
+/*
+ * Copyright (c)2013-2020 ZeroTier, Inc.
+ *
+ * Use of this software is governed by the Business Source License included
+ * in the LICENSE.TXT file in the project's root directory.
+ *
+ * Change Date: 2024-01-01
+ *
+ * On the date above, in accordance with the Business Source License, use
+ * of this software will be governed by version 2.0 of the Apache License.
+ */
+/****/
+
+#ifndef ZT_H_HPP
+#define ZT_H_HPP
+
+#include "Constants.hpp"
+#include "TriviallyCopyable.hpp"
+
+namespace ZeroTier {
+
+/**
+ * Container for cryptographic hashes
+ *
+ * The size of the hash used with this container must be a multiple of 64 bits.
+ * Currently it's used as H<384> and H<512>.
+ *
+ * Warning: the [] operator is not bounds checked.
+ *
+ * @tparam BITS Bits in hash, must be a multiple of 64
+ */
+template<unsigned int BITS>
+class H : public TriviallyCopyable
+{
+public:
+	ZT_ALWAYS_INLINE H() noexcept {}
+
+	/**
+	 * @param h Hash value of size BITS / 8
+	 */
+	explicit ZT_ALWAYS_INLINE H(const void *h) noexcept { memcpy(_h,h,BITS / 8); }
+
+	/**
+	 * @param h Hash value of size BITS / 8
+	 */
+	ZT_ALWAYS_INLINE void set(const void *h) noexcept { memcpy(_h,h,BITS / 8); }
+
+	ZT_ALWAYS_INLINE void zero() noexcept
+	{
+		for(int i=0;i<(BITS / sizeof(unsigned long));++i)
+			_h[i] = 0;
+	}
+
+	ZT_ALWAYS_INLINE uint8_t *data() noexcept { return reinterpret_cast<uint8_t *>(_h); }
+	ZT_ALWAYS_INLINE const uint8_t *data() const noexcept { return reinterpret_cast<const uint8_t *>(_h); }
+
+	ZT_ALWAYS_INLINE uint8_t operator[](const unsigned int i) const noexcept { return reinterpret_cast<const uint8_t *>(_h)[i]; }
+	ZT_ALWAYS_INLINE uint8_t &operator[](const unsigned int i) noexcept { return reinterpret_cast<uint8_t *>(_h)[i]; }
+
+	static constexpr unsigned int size() noexcept { return BITS / 8; }
+
+	ZT_ALWAYS_INLINE unsigned long hashCode() const noexcept { return _h[0]; }
+
+	ZT_ALWAYS_INLINE operator bool() const noexcept
+	{
+		for(int i=0;i<(BITS / sizeof(unsigned long));++i) {
+			if (_h[i] != 0)
+				return true;
+		}
+		return false;
+	}
+
+	ZT_ALWAYS_INLINE bool operator==(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) == 0; }
+	ZT_ALWAYS_INLINE bool operator!=(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) != 0; }
+	ZT_ALWAYS_INLINE bool operator<(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) < 0; }
+	ZT_ALWAYS_INLINE bool operator>(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) > 0; }
+	ZT_ALWAYS_INLINE bool operator<=(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) <= 0; }
+	ZT_ALWAYS_INLINE bool operator>=(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) >= 0; }
+
+private:
+	unsigned long _h[BITS / sizeof(unsigned long)];
+};
+
+} // namespace ZeroTier
+
+#endif

+ 99 - 65
node/Identity.cpp

@@ -93,7 +93,6 @@ bool Identity::generate(const Type t)
 
 	_type = t;
 	_hasPrivate = true;
-	_hash[0] = 0; // force hash recompute
 
 	switch(t) {
 		case C25519: {
@@ -103,32 +102,28 @@ bool Identity::generate(const Type t)
 				_address.setTo(digest + 59); // last 5 bytes are address
 			} while (_address.isReserved());
 			delete[] genmem;
+			_computeHash();
 		} break;
 
 		case P384: {
+			AES c;
 			do {
 				C25519::generate(_pub.c25519,_priv.c25519);
 				ECC384GenerateKey(_pub.p384,_priv.p384);
 
-				// This is just an intentionally complex hash function for use with a simple hashcash
-				// design to slow down identity generation as a defense in depth against brute force
-				// collision searches. V0 used a somewhat more overkill memory intensive design that's
-				// not really necessary and makes verifications too slow, so V1 uses this instead.
-
 				SHA384(digest,&_pub,sizeof(_pub));
-				AES c(digest);
-				SHA384(digest,digest,48);
-				std::sort(digest,digest + 48);
+				c.init(digest);
 				c.encrypt(digest,digest);
 				c.encrypt(digest + 16,digest + 16);
 				c.encrypt(digest + 32,digest + 32);
-				SHA384(digest,digest,48);
 
 				if (digest[47] != 0)
 					continue;
 
 				_address.setTo(digest);
 			} while (_address.isReserved());
+
+			_hash.set(digest); // P384 uses the same hash for hash() and address generation
 		} break;
 
 		default:
@@ -143,6 +138,7 @@ bool Identity::locallyValidate() const
 	if ((_address.isReserved())||(!_address))
 		return false;
 	switch (_type) {
+
 		case C25519:
 			try {
 				uint8_t digest[64];
@@ -153,51 +149,30 @@ bool Identity::locallyValidate() const
 			} catch ( ... ) {}
 			return false;
 
-		case P384: {
-			const uint8_t *hash = this->hash();
-			return ((hash[47] == 0)&&(Address(hash) == _address));
-		}
+		case P384:
+			return ((_hash[47] == 0)&&(Address(_hash.data()) == _address));
 
-		default:
-			return false;
 	}
+	return false;
 }
 
-const uint8_t *Identity::hash() const
+void Identity::hashWithPrivate(uint8_t h[48]) const
 {
-	uint8_t *const hash = const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(_hash));
-	switch(_type) {
-		default:
-			memset(hash,0,48);
-			break;
+	if (_hasPrivate) {
+		switch (_type) {
 
-		case C25519:
-			if (_hash[0] == 0)
-				SHA384(hash,_pub.c25519,ZT_C25519_PUBLIC_KEY_LEN);
-			break;
+			case C25519:
+				SHA384(h,_pub.c25519,ZT_C25519_PUBLIC_KEY_LEN,_priv.c25519,ZT_C25519_PRIVATE_KEY_LEN);
+				break;
 
-		case P384:
-			if (_hash[0] == 0) {
-				SHA384(hash,&_pub,sizeof(_pub));
-				AES c(hash);
-				std::sort(hash,hash + 48);
-				c.encrypt(hash,hash);
-				c.encrypt(hash + 16,hash + 16);
-				c.encrypt(hash + 32,hash + 32);
-				SHA384(hash,hash,48);
-			}
-			break;
-	}
-	return hash;
-}
+			case P384:
+				SHA384(h,&_pub,sizeof(_pub),&_priv,sizeof(_priv));
+				break;
 
-void Identity::hashWithPrivate(uint8_t h[48]) const
-{
-	switch(_type) {
-		case C25519: SHA384(h,_pub.c25519,ZT_C25519_PUBLIC_KEY_LEN,_priv.c25519,ZT_C25519_PRIVATE_KEY_LEN); break;
-		case P384: SHA384(h,&_pub,sizeof(_pub),&_priv,sizeof(_priv)); break;
-		default: memset(h,0,48);
+		}
+		return;
 	}
+	memset(h,0,48);
 }
 
 unsigned int Identity::sign(const void *data,unsigned int len,void *sig,unsigned int siglen) const
@@ -213,9 +188,11 @@ unsigned int Identity::sign(const void *data,unsigned int len,void *sig,unsigned
 
 			case P384:
 				if (siglen >= ZT_ECC384_SIGNATURE_SIZE) {
-					// When signing with P-384 we also include the C25519 public key in the hash.
+					// For P384 we sign SHA384(data | public keys) for added defense against any attack
+					// that attempted to decouple the two keys in some way. Otherwise this has no impact
+					// on the security of the signature (unless SHA384 had some serious flaw).
 					uint8_t h[48];
-					SHA384(h,data,len,_pub.c25519,ZT_C25519_PUBLIC_KEY_LEN);
+					SHA384(h,data,len,&_pub,ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
 					ECC384ECDSASign(_priv.p384,h,(uint8_t *)sig);
 					return ZT_ECC384_SIGNATURE_SIZE;
 				}
@@ -235,7 +212,7 @@ bool Identity::verify(const void *data,unsigned int len,const void *sig,unsigned
 		case P384:
 			if (siglen == ZT_ECC384_SIGNATURE_SIZE) {
 				uint8_t h[48];
-				SHA384(h,data,len,_pub.c25519,ZT_C25519_PUBLIC_KEY_LEN);
+				SHA384(h,data,len,&_pub,ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
 				return ECC384ECDSAVerify(_pub.p384,h,(const uint8_t *)sig);
 			}
 			break;
@@ -263,6 +240,11 @@ bool Identity::agree(const Identity &id,uint8_t key[ZT_PEER_SECRET_KEY_LENGTH])
 		} else if (_type == P384) {
 
 			if (id._type == P384) {
+				// For another P384 identity we execute DH agreement with BOTH keys and then
+				// hash the results together. For those (cough FIPS cough) who only consider
+				// P384 to be kosher, the C25519 secret can be considered a "salt"
+				// or something. For those who don't trust P384 this means the privacy of
+				// your traffic is also protected by C25519.
 				C25519::agree(_priv.c25519,id._pub.c25519,rawkey);
 				ECC384ECDH(id._pub.p384,_priv.p384,rawkey + ZT_C25519_SHARED_KEY_LEN);
 				SHA384(h,rawkey,ZT_C25519_SHARED_KEY_LEN + ZT_ECC384_SHARED_SECRET_SIZE);
@@ -283,13 +265,14 @@ bool Identity::agree(const Identity &id,uint8_t key[ZT_PEER_SECRET_KEY_LENGTH])
 
 char *Identity::toString(bool includePrivate,char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH]) const
 {
+	char *p = buf;
+	_address.toString(p);
+	p += 10;
+	*(p++) = ':';
+
 	switch(_type) {
 
 		case C25519: {
-			char *p = buf;
-			Utils::hex10(_address.toInt(),p);
-			p += 10;
-			*(p++) = ':';
 			*(p++) = '0';
 			*(p++) = ':';
 			Utils::hex(_pub.c25519,ZT_C25519_PUBLIC_KEY_LEN,p);
@@ -304,10 +287,6 @@ char *Identity::toString(bool includePrivate,char buf[ZT_IDENTITY_STRING_BUFFER_
 		}
 
 		case P384: {
-			char *p = buf;
-			Utils::hex10(_address.toInt(),p);
-			p += 10;
-			*(p++) = ':';
 			*(p++) = '1';
 			*(p++) = ':';
 			int el = Utils::b32e((const uint8_t *)(&_pub),sizeof(_pub),p,(int)(ZT_IDENTITY_STRING_BUFFER_LENGTH - (uintptr_t)(p - buf)));
@@ -324,13 +303,14 @@ char *Identity::toString(bool includePrivate,char buf[ZT_IDENTITY_STRING_BUFFER_
 		}
 
 	}
+
 	return nullptr;
 }
 
 bool Identity::fromString(const char *str)
 {
+	_hash.zero();
 	_hasPrivate = false;
-	_hash[0] = 0; // force hash recompute
 
 	if (!str) {
 		_address.zero();
@@ -421,6 +401,8 @@ bool Identity::fromString(const char *str)
 		return false;
 	}
 
+	_computeHash();
+
 	return true;
 }
 
@@ -430,50 +412,67 @@ int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX],const bool incl
 	switch(_type) {
 		case C25519:
 			data[ZT_ADDRESS_LENGTH] = (uint8_t)C25519;
+
 			memcpy(data + ZT_ADDRESS_LENGTH + 1,_pub.c25519,ZT_C25519_PUBLIC_KEY_LEN);
+
 			if ((includePrivate)&&(_hasPrivate)) {
 				data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN] = ZT_C25519_PRIVATE_KEY_LEN;
 				memcpy(data + ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1,_priv.c25519,ZT_C25519_PRIVATE_KEY_LEN);
-				return (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1 + ZT_C25519_PRIVATE_KEY_LEN);
+				return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1 + ZT_C25519_PRIVATE_KEY_LEN;
+			} else {
+				data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN] = 0;
+				return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1;
 			}
-			data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN] = 0;
-			return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1;
 
 		case P384:
 			data[ZT_ADDRESS_LENGTH] = (uint8_t)P384;
-			memcpy(data + 1 + ZT_ADDRESS_LENGTH,&_pub,ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
+
+			memcpy(data + ZT_ADDRESS_LENGTH + 1,&_pub,ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
+
 			if ((includePrivate)&&(_hasPrivate)) {
 				data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] = ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
 				memcpy(data + ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1,&_priv,ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE);
 				return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
+			} else {
+				data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] = 0;
+				return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
 			}
-			data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] = 0;
-			return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
+
 	}
 	return -1;
 }
 
 int Identity::unmarshal(const uint8_t *data,const int len) noexcept
 {
+	_hash.zero();
+	_hasPrivate = false;
+
 	if (len < (ZT_ADDRESS_LENGTH + 1))
 		return -1;
-	_hash[0] = 0; // force hash recompute
+
 	unsigned int privlen;
 	switch((_type = (Type)data[ZT_ADDRESS_LENGTH])) {
 
 		case C25519:
 			if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1))
 				return -1;
+
 			memcpy(_pub.c25519,data + ZT_ADDRESS_LENGTH + 1,ZT_C25519_PUBLIC_KEY_LEN);
+
 			privlen = data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN];
 			if (privlen == ZT_C25519_PRIVATE_KEY_LEN) {
 				if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1 + ZT_C25519_PRIVATE_KEY_LEN))
 					return -1;
+
 				_hasPrivate = true;
 				memcpy(_priv.c25519,data + ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1,ZT_C25519_PRIVATE_KEY_LEN);
+
+				_computeHash();
 				return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1 + ZT_C25519_PRIVATE_KEY_LEN;
 			} else if (privlen == 0) {
 				_hasPrivate = false;
+
+				_computeHash();
 				return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1;
 			}
 			break;
@@ -481,26 +480,61 @@ int Identity::unmarshal(const uint8_t *data,const int len) noexcept
 		case P384:
 			if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1))
 				return -1;
+
 			memcpy(&_pub,data + ZT_ADDRESS_LENGTH + 1,ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
+
 			privlen = data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE];
 			if (privlen == ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE) {
 				if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE))
 					return -1;
+
 				_hasPrivate = true;
 				memcpy(&_priv,data + ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1,ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE);
+				_computeHash();
 				if (!this->locallyValidate()) // for P384 we do this always
 					return -1;
+
 				return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
 			} else if (privlen == 0) {
 				_hasPrivate = false;
+
+				_computeHash();
+				if (!this->locallyValidate()) // for P384 we do this always
+					return -1;
+
 				return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
 			}
 			break;
 
 	}
+
 	return -1;
 }
 
+void Identity::_computeHash()
+{
+	switch(_type) {
+		default:
+			_hash.zero();
+			break;
+
+		case C25519:
+			SHA384(_hash.data(),_pub.c25519,ZT_C25519_PUBLIC_KEY_LEN);
+			break;
+
+		case P384:
+			if (!_hash) {
+				uint8_t *const h = _hash.data();
+				SHA384(h,&_pub,sizeof(_pub));
+				AES c(h);
+				c.encrypt(h,h);
+				c.encrypt(h + 16,h + 16);
+				c.encrypt(h + 32,h + 32);
+			}
+			break;
+	}
+}
+
 } // namespace ZeroTier
 
 extern "C" {

+ 21 - 8
node/Identity.hpp

@@ -24,6 +24,7 @@
 #include "SHA512.hpp"
 #include "ECC384.hpp"
 #include "TriviallyCopyable.hpp"
+#include "H.hpp"
 
 #define ZT_IDENTITY_STRING_BUFFER_LENGTH 1024
 #define ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE (ZT_C25519_PUBLIC_KEY_LEN + ZT_ECC384_PUBLIC_KEY_SIZE)
@@ -96,7 +97,11 @@ public:
 	bool generate(Type t);
 
 	/**
-	 * Check the validity of this identity's pairing of key to address
+	 * Check the validity of this identity's address
+	 *
+	 * For type 0 identities this is slightly time consuming. For type 1 identities it's
+	 * instantaneous. It should be done when a new identity is accepted for the very first
+	 * time.
 	 *
 	 * @return True if validation check passes
 	 */
@@ -108,7 +113,7 @@ public:
 	ZT_ALWAYS_INLINE bool hasPrivate() const noexcept { return _hasPrivate; }
 
 	/**
-	 * This gets (computing if needed) a hash of this identity's public key(s).
+	 * Get hash of this identity's public key(s)
 	 *
 	 * The hash returned by this function differs by identity type. For C25519 (type 0)
 	 * identities this returns a simple SHA384 of the public key, which is NOT the same
@@ -117,12 +122,18 @@ public:
 	 * and address computation. This difference is because the v0 hash is expensive while
 	 * the v1 hash is fast.
 	 *
-	 * @return 384-bit/48-byte hash (pointer remains valid as long as Identity object exists)
+	 * While addresses can technically collide (though this is rare and hard to create),
+	 * the full hash of an identity's keys is unique to within cryptographic strength
+	 * bounds of the keys themselves.
+	 *
+	 * @return 384-bit/48-byte hash
 	 */
-	const uint8_t *hash() const;
+	ZT_ALWAYS_INLINE const H<384> &hash() const noexcept { return _hash; }
 
 	/**
-	 * Compute a hash of this identity's public and private keys
+	 * Compute a hash of this identity's public and private keys.
+	 *
+	 * If there is no private key or the identity is NIL the buffer is filled with zero.
 	 *
 	 * @param h Buffer to store SHA384 hash
 	 */
@@ -234,10 +245,10 @@ public:
 	int unmarshal(const uint8_t *data,int len) noexcept;
 
 private:
+	void _computeHash();
+
 	Address _address;
-	uint64_t _hash[6]; // hash of public key memo-ized for performance, recalculated when _hash[0] == 0
-	Type _type; // _type determines which fields in _priv and _pub are used
-	bool _hasPrivate;
+	H<384> _hash;
 	ZT_PACKED_STRUCT(struct { // don't re-order these
 		uint8_t c25519[ZT_C25519_PRIVATE_KEY_LEN];
 		uint8_t p384[ZT_ECC384_PRIVATE_KEY_SIZE];
@@ -246,6 +257,8 @@ private:
 		uint8_t c25519[ZT_C25519_PUBLIC_KEY_LEN]; // Curve25519 and Ed25519 public keys
 		uint8_t p384[ZT_ECC384_PUBLIC_KEY_SIZE];  // NIST P-384 public key
 	}) _pub;
+	Type _type; // _type determines which fields in _priv and _pub are used
+	bool _hasPrivate;
 };
 
 } // namespace ZeroTier

+ 9 - 10
node/Membership.cpp

@@ -60,10 +60,10 @@ void Membership::pushCredentials(const RuntimeEnvironment *RR,void *tPtr,const i
 			sendCom = false;
 			outp->wO(outl,nconf.com);
 		}
-		outp->wI(outl,(uint8_t)0);
+		outp->wI8(outl,0);
 
 		if ((outl + ZT_CAPABILITY_MARSHAL_SIZE_MAX + 2) < ZT_PROTO_MAX_PACKET_LENGTH) {
-			void *const capCountAt = outp->b + outl;
+			void *const capCountAt = outp->unsafeData + outl;
 			outl += 2;
 			unsigned int capCount = 0;
 			while (capPtr < nconf.capabilityCount) {
@@ -75,7 +75,7 @@ void Membership::pushCredentials(const RuntimeEnvironment *RR,void *tPtr,const i
 			Utils::storeBigEndian(capCountAt,(uint16_t)capCount);
 
 			if ((outl + ZT_TAG_MARSHAL_SIZE_MAX + 4) < ZT_PROTO_MAX_PACKET_LENGTH) {
-				void *const tagCountAt = outp->b + outl;
+				void *const tagCountAt = outp->unsafeData + outl;
 				outl += 2;
 				unsigned int tagCount = 0;
 				while (tagPtr < nconf.tagCount) {
@@ -86,10 +86,10 @@ void Membership::pushCredentials(const RuntimeEnvironment *RR,void *tPtr,const i
 				}
 				Utils::storeBigEndian(tagCountAt,(uint16_t)tagCount);
 
-				outp->wI(outl,(uint16_t)0); // no revocations sent here as these propagate differently
+				outp->wI16(outl,0); // no revocations sent here as these propagate differently
 
 				if ((outl + ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX + 2) < ZT_PROTO_MAX_PACKET_LENGTH) {
-					void *const cooCountAt = outp->b + outl;
+					void *const cooCountAt = outp->unsafeData + outl;
 					outl += 2;
 					unsigned int cooCount = 0;
 					while (cooPtr < nconf.certificateOfOwnershipCount) {
@@ -102,15 +102,14 @@ void Membership::pushCredentials(const RuntimeEnvironment *RR,void *tPtr,const i
 
 					complete = true;
 				} else {
-					outp->wI(outl,(uint16_t)0);
+					outp->wI16(outl,0);
 				}
 			} else {
-				outp->wI(outl,(uint16_t)0);
-				outp->wI(outl,(uint16_t)0);
-				outp->wI(outl,(uint16_t)0);
+				outp->wI32(outl,0);
+				outp->wI16(outl,0); // three zero 16-bit integers
 			}
 		} else {
-			outp->wI(outl,(uint64_t)0); // four zero 16-bit integers
+			outp->wI64(outl,0); // four zero 16-bit integers
 		}
 
 		if (outl > sizeof(Protocol::Header)) {

+ 6 - 7
node/Network.hpp

@@ -14,13 +14,6 @@
 #ifndef ZT_NETWORK_HPP
 #define ZT_NETWORK_HPP
 
-#include <cstdint>
-#include <string>
-#include <map>
-#include <vector>
-#include <algorithm>
-#include <stdexcept>
-
 #include "Constants.hpp"
 #include "Hashtable.hpp"
 #include "Address.hpp"
@@ -34,6 +27,12 @@
 #include "NetworkConfig.hpp"
 #include "CertificateOfMembership.hpp"
 
+#include <cstdint>
+#include <string>
+#include <map>
+#include <vector>
+#include <algorithm>
+
 #define ZT_NETWORK_MAX_INCOMING_UPDATES 3
 
 namespace ZeroTier {

+ 9 - 7
node/Peer.cpp

@@ -122,7 +122,7 @@ void Peer::received(
 			RR->t->learnedNewPath(tPtr,0x582fabdd,packetId,_id,path->address(),old);
 		} else {
 			if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id,path->localSocket(),path->address())) {
-				RR->t->tryingNewPath(tPtr,0xb7747ddd,_id,path->address(),path->address(),packetId,(uint8_t)verb,_id.address(),_id.hash(),ZT_TRACE_TRYING_NEW_PATH_REASON_PACKET_RECEIVED_FROM_UNKNOWN_PATH);
+				RR->t->tryingNewPath(tPtr,0xb7747ddd,_id,path->address(),path->address(),packetId,(uint8_t)verb,_id.address(),_id.hash().data(),ZT_TRACE_TRYING_NEW_PATH_REASON_PACKET_RECEIVED_FROM_UNKNOWN_PATH);
 				sendHELLO(tPtr,path->localSocket(),path->address(),now);
 				path->sent(now);
 			}
@@ -241,7 +241,7 @@ void Peer::sendNOP(void *tPtr,const int64_t localSocket,const InetAddress &atAdd
 	ph.flags = 0;
 	ph.verb = Protocol::VERB_NOP;
 	Protocol::armor(outp,sizeof(Protocol::Header),_key,this->cipher());
-	RR->node->putPacket(tPtr,localSocket,atAddress,outp.b,sizeof(Protocol::Header));
+	RR->node->putPacket(tPtr,localSocket,atAddress,outp.unsafeData,sizeof(Protocol::Header));
 }
 
 void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
@@ -337,24 +337,26 @@ void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
 
 void Peer::save(void *tPtr) const
 {
-	uint8_t *const buf = (uint8_t *)malloc(ZT_PEER_MARSHAL_SIZE_MAX);
+	uint8_t *const buf = (uint8_t *)malloc(8 + ZT_PEER_MARSHAL_SIZE_MAX);
 	if (!buf) return;
 
+	Utils::storeBigEndian<uint64_t>(buf,(uint64_t)RR->node->now());
+
 	_lock.rlock();
-	const int len = marshal(buf);
+	const int len = marshal(buf + 8);
 	_lock.runlock();
 
 	if (len > 0) {
 		uint64_t id[2];
 		id[0] = _id.address().toInt();
 		id[1] = 0;
-		RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len);
+		RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len + 8);
 	}
 
 	free(buf);
 }
 
-void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,const bool behindSymmetric,const bool bfg1024)
+void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,const bool bfg1024)
 {
 	static uint8_t junk = 0;
 
@@ -376,7 +378,7 @@ void Peer::contact(void *tPtr,const Endpoint &ep,const int64_t now,const bool be
 
 		// If the peer indicates that they may be behind a symmetric NAT and there are no
 		// living direct paths, try a few more aggressive things.
-		if ((behindSymmetric) && (phyAddr.ss_family == AF_INET) && (!direct(now))) {
+		if ((phyAddr.ss_family == AF_INET) && (!direct(now))) {
 			unsigned int port = phyAddr.port();
 			if ((bfg1024)&&(port < 1024)&&(RR->node->natMustDie())) {
 				// If the other side is using a low-numbered port and has elected to

+ 24 - 20
node/Peer.hpp

@@ -170,7 +170,11 @@ public:
 	/**
 	 * @return Bootstrap address or NULL if none
 	 */
-	ZT_ALWAYS_INLINE const Endpoint &bootstrap() const noexcept { return _bootstrap; }
+	ZT_ALWAYS_INLINE const Endpoint &bootstrap() const noexcept
+	{
+		RWMutex::RLock l(_lock);
+		return _bootstrap;
+	}
 
 	/**
 	 * Set bootstrap endpoint
@@ -179,9 +183,8 @@ public:
 	 */
 	ZT_ALWAYS_INLINE void setBootstrap(const Endpoint &ep) noexcept
 	{
-		_lock.lock();
+		RWMutex::Lock l(_lock);
 		_bootstrap = ep;
-		_lock.unlock();
 	}
 
 	/**
@@ -315,17 +318,14 @@ public:
 	void save(void *tPtr) const;
 
 	/**
-	 * Attempt to contact this peer at a physical address
-	 *
-	 * This checks rate limits, path usability, sometimes deploys advanced NAT-t techniques, etc.
+	 * Attempt to contact this peer at a physical address, subject to internal checks
 	 *
 	 * @param tPtr External user pointer we pass around
 	 * @param ep Endpoint to attempt to contact
 	 * @param now Current time
-	 * @param behindSymmetric This peer may be behind a symmetric NAT (only meaningful for IPv4)
 	 * @param bfg1024 Use BFG1024 brute force symmetric NAT busting algorithm if applicable
 	 */
-	void contact(void *tPtr,const Endpoint &ep,int64_t now,bool behindSymmetric,bool bfg1024);
+	void contact(void *tPtr,const Endpoint &ep,int64_t now,bool bfg1024);
 
 	/**
 	 * Called by Node when an alarm set by this peer goes off
@@ -348,20 +348,24 @@ private:
 
 	const RuntimeEnvironment *RR;
 
-	volatile int64_t _lastReceive;
-	volatile int64_t _lastWhoisRequestReceived;
-	volatile int64_t _lastEchoRequestReceived;
-	volatile int64_t _lastPushDirectPathsReceived;
-	volatile int64_t _lastProbeReceived;
-	volatile int64_t _lastAttemptedP2PInit;
-	volatile int64_t _lastTriedStaticPath;
-	volatile int64_t _lastPrioritizedPaths;
-	volatile int64_t _lastAttemptedAggressiveNATTraversal;
-	volatile unsigned int _latency;
-
+	// The last time various things happened, for rate limiting and periodic events.
+	std::atomic<int64_t> _lastReceive;
+	std::atomic<int64_t> _lastWhoisRequestReceived;
+	std::atomic<int64_t> _lastEchoRequestReceived;
+	std::atomic<int64_t> _lastPushDirectPathsReceived;
+	std::atomic<int64_t> _lastProbeReceived;
+	std::atomic<int64_t> _lastAttemptedP2PInit;
+	std::atomic<int64_t> _lastTriedStaticPath;
+	std::atomic<int64_t> _lastPrioritizedPaths;
+	std::atomic<int64_t> _lastAttemptedAggressiveNATTraversal;
+
+	// Latency in milliseconds
+	std::atomic<unsigned int> _latency;
+
+	// For SharedPtr<>
 	std::atomic<int> __refCount;
 
-	// Lock for non-volatile read/write fields
+	// Read/write mutex for non-atomic non-const fields.
 	RWMutex _lock;
 
 	// Number of paths current alive as of last _prioritizePaths

+ 7 - 7
node/Protocol.cpp

@@ -65,8 +65,8 @@ volatile uintptr_t _checkSizesIMeanIt = _checkSizes();
 uint64_t createProbe(const Identity &sender,const Identity &recipient,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH]) noexcept
 {
 	uint8_t tmp[ZT_IDENTITY_HASH_SIZE + ZT_IDENTITY_HASH_SIZE];
-	memcpy(tmp,sender.hash(),ZT_IDENTITY_HASH_SIZE);
-	memcpy(tmp + ZT_IDENTITY_HASH_SIZE,recipient.hash(),ZT_IDENTITY_HASH_SIZE);
+	memcpy(tmp,sender.hash().data(),ZT_IDENTITY_HASH_SIZE);
+	memcpy(tmp + ZT_IDENTITY_HASH_SIZE,recipient.hash().data(),ZT_IDENTITY_HASH_SIZE);
 	uint64_t hash[6];
 	SHA384(hash,tmp,sizeof(tmp),key,ZT_PEER_SECRET_KEY_LENGTH);
 	return hash[0];
@@ -98,7 +98,7 @@ void armor(Buf &pkt,int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],
 			// only difference here is that we don't encrypt the payload
 
 			uint64_t mac[2];
-			poly1305(mac,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
+			poly1305(mac,pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
 			ph.mac = mac[0];
 		} break;
 
@@ -111,10 +111,10 @@ void armor(Buf &pkt,int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],
 			s20.crypt12(Utils::ZERO256,macKey,ZT_POLY1305_KEY_LEN);
 
 			const unsigned int encLen = packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START;
-			s20.crypt12(pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,encLen);
+			s20.crypt12(pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,encLen);
 
 			uint64_t mac[2];
-			poly1305(mac,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,encLen,macKey);
+			poly1305(mac,pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,encLen,macKey);
 			ph.mac = mac[0];
 		} break;
 
@@ -132,9 +132,9 @@ int compress(SharedPtr<Buf> &pkt,int packetSize) noexcept
 	if (!pkt2) return packetSize;
 
 	const int uncompressedLen = packetSize - ZT_PROTO_PACKET_PAYLOAD_START;
-	const int compressedLen = LZ4_compress_fast(reinterpret_cast<const char *>(pkt->b + ZT_PROTO_PACKET_PAYLOAD_START),reinterpret_cast<char *>(pkt2->b + ZT_PROTO_PACKET_PAYLOAD_START),uncompressedLen,ZT_BUF_MEM_SIZE - ZT_PROTO_PACKET_PAYLOAD_START);
+	const int compressedLen = LZ4_compress_fast(reinterpret_cast<const char *>(pkt->unsafeData + ZT_PROTO_PACKET_PAYLOAD_START),reinterpret_cast<char *>(pkt2->unsafeData + ZT_PROTO_PACKET_PAYLOAD_START),uncompressedLen,ZT_BUF_MEM_SIZE - ZT_PROTO_PACKET_PAYLOAD_START);
 	if ((compressedLen > 0)&&(compressedLen < uncompressedLen)) {
-		memcpy(pkt2->b,pkt->b,ZT_PROTO_PACKET_PAYLOAD_START);
+		memcpy(pkt2->unsafeData,pkt->unsafeData,ZT_PROTO_PACKET_PAYLOAD_START);
 		pkt.swap(pkt2);
 		pkt->as<Protocol::Header>().verb |= ZT_PROTO_VERB_FLAG_COMPRESSED;
 		return compressedLen + ZT_PROTO_PACKET_PAYLOAD_START;

+ 13 - 9
node/Protocol.hpp

@@ -640,13 +640,17 @@ enum Verb
 	 *   <[...] paths>
 	 *
 	 * Path record format:
-	 *   <[1] 8-bit path flags (always 0, currently unused)>
+	 *   <[1] 8-bit path flags>
 	 *   <[2] length of extended path characteristics or 0 for none>
 	 *   <[...] extended path characteristics>
 	 *   <[1] address type>
-	 *   <[1] address length in bytes>
+	 *   <[1] address record length in bytes>
 	 *   <[...] address>
 	 *
+	 * Path flags:
+	 *   0x01 - Sender is likely behind a symmetric NAT
+	 *   0x02 - Use BFG1024 algorithm for symmetric NAT-t if conditions met
+	 *
 	 * The receiver may, upon receiving a push, attempt to establish a
 	 * direct link to one or more of the indicated addresses. It is the
 	 * responsibility of the sender to limit which peers it pushes direct
@@ -965,21 +969,21 @@ ZT_PACKED_STRUCT(struct UNSUPPORTED_OPERATION__NETWORK_CONFIG_REQUEST
  * @param packetSize Packet's actual size in bytes
  * @return Packet ID or 0 if packet size is less than 8
  */
-ZT_ALWAYS_INLINE uint64_t packetId(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= 8) ? Utils::loadBigEndian<uint64_t>(pkt.b) : 0ULL; }
+ZT_ALWAYS_INLINE uint64_t packetId(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= 8) ? Utils::loadBigEndian<uint64_t>(pkt.unsafeData) : 0ULL; }
 
 /**
  * @param Packet to extract hops from
  * @param packetSize Packet's actual size in bytes
  * @return 3-bit hops field embedded in packet flags field
  */
-ZT_ALWAYS_INLINE uint8_t packetHops(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= ZT_PROTO_PACKET_FLAGS_INDEX) ? (pkt.b[ZT_PROTO_PACKET_FLAGS_INDEX] & ZT_PROTO_FLAG_FIELD_HOPS_MASK) : 0; }
+ZT_ALWAYS_INLINE uint8_t packetHops(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= ZT_PROTO_PACKET_FLAGS_INDEX) ? (pkt.unsafeData[ZT_PROTO_PACKET_FLAGS_INDEX] & ZT_PROTO_FLAG_FIELD_HOPS_MASK) : 0; }
 
 /**
  * @param Packet to extract cipher ID from
  * @param packetSize Packet's actual size in bytes
  * @return 3-bit cipher field embedded in packet flags field
  */
-ZT_ALWAYS_INLINE uint8_t packetCipher(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= ZT_PROTO_PACKET_FLAGS_INDEX) ? ((pkt.b[ZT_PROTO_PACKET_FLAGS_INDEX] >> 3U) & 0x07U) : 0; }
+ZT_ALWAYS_INLINE uint8_t packetCipher(const Buf &pkt,const unsigned int packetSize) noexcept { return (packetSize >= ZT_PROTO_PACKET_FLAGS_INDEX) ? ((pkt.unsafeData[ZT_PROTO_PACKET_FLAGS_INDEX] >> 3U) & 0x07U) : 0; }
 
 /**
  * @return 3-bit hops field embedded in packet flags field
@@ -1009,14 +1013,14 @@ ZT_ALWAYS_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const
 	for(int i=0;i<18;++i)
 		out[i] = in[i] ^ packet.b[i];
 #else
-	*reinterpret_cast<uint64_t *>(out) = *reinterpret_cast<const uint64_t *>(in) ^ *reinterpret_cast<const uint64_t *>(packet.b);
-	*reinterpret_cast<uint64_t *>(out + 8) = *reinterpret_cast<const uint64_t *>(in + 8) ^ *reinterpret_cast<const uint64_t *>(packet.b + 8);
-	*reinterpret_cast<uint16_t *>(out + 16) = *reinterpret_cast<const uint16_t *>(in + 16) ^ *reinterpret_cast<const uint16_t *>(packet.b + 16);
+	*reinterpret_cast<uint64_t *>(out) = *reinterpret_cast<const uint64_t *>(in) ^ *reinterpret_cast<const uint64_t *>(packet.unsafeData);
+	*reinterpret_cast<uint64_t *>(out + 8) = *reinterpret_cast<const uint64_t *>(in + 8) ^ *reinterpret_cast<const uint64_t *>(packet.unsafeData + 8);
+	*reinterpret_cast<uint16_t *>(out + 16) = *reinterpret_cast<const uint16_t *>(in + 16) ^ *reinterpret_cast<const uint16_t *>(packet.unsafeData + 16);
 #endif
 
 	// Flags, but with hop count masked off. Hop count is altered by forwarding
 	// nodes and is the only field that is mutable by unauthenticated third parties.
-	out[18] = in[18] ^ (packet.b[18] & 0xf8U);
+	out[18] = in[18] ^ (packet.unsafeData[18] & 0xf8U);
 
 	// Raw packet size in bytes -- thus each packet size defines a new key space.
 	out[19] = in[19] ^ (uint8_t)packetSize;

+ 27 - 18
node/Topology.cpp

@@ -38,8 +38,10 @@ Topology::Topology(const RuntimeEnvironment *renv,const Identity &myId,void *tPt
 	RR(renv),
 	_myIdentity(myId),
 	_numConfiguredPhysicalPaths(0),
-	_peers(128),
-	_paths(256)
+	_peers(256),
+	_peersByIncomingProbe(256),
+	_peersByIdentityHash(256),
+	_paths(1024)
 {
 	uint64_t idtmp[2]; idtmp[0] = 0; idtmp[1] = 0;
 	std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_ROOTS,idtmp));
@@ -65,6 +67,9 @@ Topology::Topology(const RuntimeEnvironment *renv,const Identity &myId,void *tPt
 			p->init(*r);
 		}
 		_rootPeers.push_back(p);
+		_peers[p->address()] = p;
+		_peersByIncomingProbe[p->incomingProbe()] = p;
+		_peersByIdentityHash[p->identity().hash()] = p;
 	}
 }
 
@@ -83,11 +88,13 @@ SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
 	_loadCached(tPtr,peer->address(),hp);
 	if (hp) {
 		_peersByIncomingProbe[peer->incomingProbe()] = hp;
+		_peersByIdentityHash[peer->identity().hash()] = hp;
 		return hp;
 	}
 
 	hp = peer;
 	_peersByIncomingProbe[peer->incomingProbe()] = peer;
+	_peersByIdentityHash[peer->identity().hash()] = peer;
 
 	return peer;
 }
@@ -150,6 +157,8 @@ void Topology::addRoot(void *tPtr,const Identity &id,const InetAddress &bootstra
 			p->init(id);
 			if (bootstrap)
 				p->setBootstrap(Endpoint(bootstrap));
+			_peersByIncomingProbe[p->incomingProbe()] = p;
+			_peersByIdentityHash[p->identity().hash()] = p;
 		}
 		_rootPeers.push_back(p);
 
@@ -204,6 +213,7 @@ void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
 			if ( (!(*p)->alive(now)) && (_roots.count((*p)->identity()) == 0) ) {
 				(*p)->save(tPtr);
 				_peersByIncomingProbe.erase((*p)->incomingProbe());
+				_peersByIdentityHash.erase((*p)->identity().hash());
 				_peers.erase(*a);
 			}
 		}
@@ -226,11 +236,8 @@ void Topology::saveAll(void *tPtr)
 	Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
 	Address *a = nullptr;
 	SharedPtr<Peer> *p = nullptr;
-	while (i.next(a,p)) {
-		if ( (!(*p)->alive(RR->node->now())) && (_roots.count((*p)->identity()) == 0) ) {
-			(*p)->save((void *)0);
-		}
-	}
+	while (i.next(a,p))
+		(*p)->save(tPtr);
 }
 
 void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
@@ -240,19 +247,21 @@ void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
 		id[0] = zta.toInt();
 		id[1] = 0;
 		std::vector<uint8_t> data(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_PEER,id));
-		if (!data.empty()) {
+		if (data.size() > 8) {
 			const uint8_t *d = data.data();
 			int dl = (int)data.size();
-			for (;;) {
-				Peer *const p = new Peer(RR);
-				int n = p->unmarshal(d,dl);
-				if (n > 0) {
-					// TODO: will eventually handle multiple peers
-					peer.set(p);
-					return;
-				} else {
-					delete p;
-				}
+
+			const int64_t ts = (int64_t)Utils::loadBigEndian<uint64_t>(d);
+			Peer *const p = new Peer(RR);
+			int n = p->unmarshal(d + 8,dl - 8);
+			if (n < 0) {
+				delete p;
+				return;
+			}
+			if ((RR->node->now() - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
+				// TODO: handle many peers, same address (?)
+				peer.set(p);
+				return;
 			}
 		}
 	} catch ( ... ) {

+ 44 - 37
node/Topology.hpp

@@ -30,6 +30,7 @@
 #include "Hashtable.hpp"
 #include "SharedPtr.hpp"
 #include "ScopedPtr.hpp"
+#include "H.hpp"
 
 namespace ZeroTier {
 
@@ -66,24 +67,40 @@ public:
 	ZT_ALWAYS_INLINE SharedPtr<Peer> peer(void *tPtr,const Address &zta,const bool loadFromCached = true)
 	{
 		{
-			RWMutex::RLock _l(_peers_l);
+			RWMutex::RLock l(_peers_l);
 			const SharedPtr<Peer> *const ap = _peers.get(zta);
 			if (ap)
 				return *ap;
 		}
-
-		SharedPtr<Peer> p;
-		if (loadFromCached) {
-			_loadCached(tPtr,zta,p);
-			if (p) {
-				RWMutex::Lock _l(_peers_l);
-				SharedPtr<Peer> &hp = _peers[zta];
-				if (!hp)
+		{
+			SharedPtr<Peer> p;
+			if (loadFromCached) {
+				_loadCached(tPtr,zta,p);
+				if (p) {
+					RWMutex::Lock l(_peers_l);
+					SharedPtr<Peer> &hp = _peers[zta];
+					if (hp)
+						return hp;
 					hp = p;
+				}
 			}
+			return p;
 		}
+	}
 
-		return p;
+	/**
+	 * Get a peer by its 384-bit identity public key hash
+	 *
+	 * @param hash Identity hash
+	 * @return Peer or NULL if no peer is currently in memory for this hash (cache is not checked in this case)
+	 */
+	ZT_ALWAYS_INLINE SharedPtr<Peer> peerByHash(const H<384> &hash)
+	{
+		RWMutex::RLock _l(_peers_l);
+		const SharedPtr<Peer> *const ap = _peersByIdentityHash.get(hash);
+		if (ap)
+			return *ap;
+		return SharedPtr<Peer>();
 	}
 
 	/**
@@ -111,29 +128,21 @@ public:
 	ZT_ALWAYS_INLINE SharedPtr<Path> path(const int64_t l,const InetAddress &r)
 	{
 		const uint64_t k = _pathHash(l,r);
-
-		_paths_l.rlock();
-		SharedPtr<Path> p(_paths[k]);
-		_paths_l.runlock();
-		if (p)
-			return p;
-
-		_paths_l.lock();
-		SharedPtr<Path> &p2 = _paths[k];
-		if (p2) {
-			p = p2;
-		} else {
-			try {
-				p.set(new Path(l,r));
-			} catch ( ... ) {
-				_paths_l.unlock();
-				return SharedPtr<Path>();
-			}
+		{
+			RWMutex::RLock lck(_paths_l);
+			SharedPtr<Path> *const p = _paths.get(k);
+			if (p)
+				return *p;
+		}
+		{
+			SharedPtr<Path> p(new Path(l,r));
+			RWMutex::Lock lck(_paths_l);
+			SharedPtr<Path> &p2 = _paths[k];
+			if (p2)
+				return p2;
 			p2 = p;
+			return p;
 		}
-		_paths_l.unlock();
-
-		return p;
 	}
 
 	/**
@@ -173,9 +182,8 @@ public:
 		Hashtable< Address,SharedPtr<Peer> >::Iterator i(const_cast<Topology *>(this)->_peers);
 		Address *a = nullptr;
 		SharedPtr<Peer> *p = nullptr;
-		while (i.next(a,p)) {
+		while (i.next(a,p))
 			f(*((const SharedPtr<Peer> *)p));
-		}
 	}
 
 	/**
@@ -202,9 +210,8 @@ public:
 			Hashtable< Address,SharedPtr<Peer> >::Iterator i(const_cast<Topology *>(this)->_peers);
 			Address *a = nullptr;
 			SharedPtr<Peer> *p = nullptr;
-			while (i.next(a,p)) {
+			while (i.next(a,p))
 				f(*((const SharedPtr<Peer> *)p),std::binary_search(rootPeerPtrs.begin(),rootPeerPtrs.end(),(uintptr_t)p->ptr()));
-			}
 		} catch ( ... ) {} // should not throw
 	}
 
@@ -221,9 +228,8 @@ public:
 		Hashtable< uint64_t,SharedPtr<Path> >::Iterator i(const_cast<Topology *>(this)->_paths);
 		uint64_t *k = nullptr;
 		SharedPtr<Path> *p = nullptr;
-		while (i.next(k,p)) {
+		while (i.next(k,p))
 			f(*((const SharedPtr<Path> *)p));
-		}
 	}
 
 	/**
@@ -359,6 +365,7 @@ private:
 
 	Hashtable< Address,SharedPtr<Peer> > _peers;
 	Hashtable< uint64_t,SharedPtr<Peer> > _peersByIncomingProbe;
+	Hashtable< H<384>,SharedPtr<Peer> > _peersByIdentityHash;
 	Hashtable< uint64_t,SharedPtr<Path> > _paths;
 	std::set< Identity > _roots; // locked by _peers_l
 	std::vector< SharedPtr<Peer> > _rootPeers; // locked by _peers_l

+ 6 - 6
node/Trace.cpp

@@ -118,15 +118,15 @@ void Trace::_tryingNewPath(
 	ev.evType = ZT_CONST_TO_BE_UINT16(ZT_TRACE_VL1_TRYING_NEW_PATH);
 	ev.codeLocation = Utils::hton(codeLocation);
 	ev.address = Utils::hton(trying.address().toInt());
-	memcpy(ev.identityHash,trying.hash(),48);
+	memcpy(ev.identityHash,trying.hash().data(),48);
 	physicalAddress.forTrace(ev.physicalAddress);
 	triggerAddress.forTrace(ev.triggerAddress);
 	ev.triggeringPacketId = triggeringPacketId;
 	ev.triggeringPacketVerb = triggeringPacketVerb;
 	ev.triggeredByAddress = Utils::hton(triggeredByAddress);
 	if (triggeredByIdentityHash)
-		memcpy(ev.triggeredByIdentityHash,triggeredByIdentityHash,48);
-	else memset(ev.triggeredByIdentityHash,0,48);
+		memcpy(ev.triggeredByIdentityHash,triggeredByIdentityHash,ZT_IDENTITY_HASH_SIZE);
+	else memset(ev.triggeredByIdentityHash,0,ZT_IDENTITY_HASH_SIZE);
 	ev.reason = (uint8_t)reason;
 	RR->node->postEvent(tPtr,ZT_EVENT_TRACE,&ev);
 }
@@ -145,7 +145,7 @@ void Trace::_learnedNewPath(
 	ev.codeLocation = Utils::hton(codeLocation);
 	ev.packetId = packetId; // packet IDs are kept in big-endian
 	ev.address = Utils::hton(peerIdentity.address().toInt());
-	memcpy(ev.identityHash,peerIdentity.hash(),48);
+	memcpy(ev.identityHash,peerIdentity.hash().data(),ZT_IDENTITY_HASH_SIZE);
 	physicalAddress.forTrace(ev.physicalAddress);
 	replaced.forTrace(ev.replaced);
 
@@ -171,10 +171,10 @@ void Trace::_incomingPacketDropped(
 	ev.networkId = Utils::hton(networkId);
 	if (peerIdentity) {
 		ev.address = Utils::hton(peerIdentity.address().toInt());
-		memcpy(ev.identityHash,peerIdentity.hash(),48);
+		memcpy(ev.identityHash,peerIdentity.hash().data(),ZT_IDENTITY_HASH_SIZE);
 	} else {
 		ev.address = 0;
-		memset(ev.identityHash,0,48);
+		memset(ev.identityHash,0,ZT_IDENTITY_HASH_SIZE);
 	}
 	physicalAddress.forTrace(ev.physicalAddress);
 	ev.hops = hops;

+ 62 - 52
node/VL1.cpp

@@ -68,7 +68,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 		// but we might as well avoid it. When the peer receives NOP on a path that hasn't been handshaked yet
 		// it will send its own HELLO to which we will respond with a fully encrypted OK(HELLO).
 		if (len == ZT_PROTO_PROBE_LENGTH) {
-			const SharedPtr<Peer> peer(RR->topology->peerByProbe(Utils::loadAsIsEndian<uint64_t>(data->b)));
+			const SharedPtr<Peer> peer(RR->topology->peerByProbe(data->lI64(0)));
 			if ((peer)&&(peer->rateGateInboundProbe(now))) {
 				peer->sendNOP(tPtr,path->localSocket(),path->address(),now);
 				path->sent(now);
@@ -90,7 +90,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 		// Destination address of packet (filled below)
 		Address destination;
 
-		if (data->b[ZT_PROTO_PACKET_FRAGMENT_INDICATOR_INDEX] == ZT_PROTO_PACKET_FRAGMENT_INDICATOR) {
+		if (data->lI8(ZT_PROTO_PACKET_FRAGMENT_INDICATOR_INDEX) == ZT_PROTO_PACKET_FRAGMENT_INDICATOR) {
 			// Fragment -----------------------------------------------------------------------------------------------------
 
 			const Protocol::FragmentHeader &fragmentHeader = data->as<Protocol::FragmentHeader>();
@@ -234,7 +234,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 
 					// Verify packet MAC.
 					uint64_t mac[2];
-					poly1305(mac,pkt.b->b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
+					poly1305(mac,pkt.b->unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
 					if (ph->mac != mac[0]) {
 						RR->t->incomingPacketDropped(tPtr,0xcc89c812,ph->packetId,0,peer->identity(),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 						return;
@@ -268,7 +268,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 							for(i=0;i<prevOverflow;++i) {
 								if (s->s >= s->e)
 									goto next_slice;
-								ps->b->b[ps->e++] = s->b->b[s->s++]; // move from head of current to end of previous
+								ps->b->unsafeData[ps->e++] = s->b->unsafeData[s->s++]; // move from head of current to end of previous
 							}
 							next_slice: ps = s++;
 						}
@@ -277,18 +277,18 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 					// Simultaneously decrypt and assemble packet into a contiguous buffer.
 					// Since we moved data around above all slices will have sizes that are
 					// multiples of 64.
-					memcpy(pkt.b->b,ph,sizeof(Protocol::Header));
+					memcpy(pkt.b->unsafeData,ph,sizeof(Protocol::Header));
 					pkt.e = sizeof(Protocol::Header);
 					for(FCV<Buf::Slice,ZT_MAX_PACKET_FRAGMENTS>::iterator s(pktv.begin());s!=pktv.end();++s) {
 						const unsigned int sliceSize = s->e - s->s;
-						s20.crypt12(s->b->b + s->s,pkt.b->b + pkt.e,sliceSize);
+						s20.crypt12(s->b->unsafeData + s->s,pkt.b->unsafeData + pkt.e,sliceSize);
 						pkt.e += sliceSize;
 					}
 					ph = &(pkt.b->as<Protocol::Header>());
 
 					// Verify packet MAC.
 					uint64_t mac[2];
-					poly1305(mac,pkt.b->b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
+					poly1305(mac,pkt.b->unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
 					if (ph->mac != mac[0]) {
 						RR->t->incomingPacketDropped(tPtr,0xbc881231,ph->packetId,0,peer->identity(),path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 						return;
@@ -353,8 +353,8 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 
 			SharedPtr<Buf> nb(new Buf());
 			const int uncompressedLen = LZ4_decompress_safe(
-				reinterpret_cast<const char *>(pkt.b->b + ZT_PROTO_PACKET_PAYLOAD_START),
-				reinterpret_cast<char *>(nb->b),
+				reinterpret_cast<const char *>(pkt.b->unsafeData + ZT_PROTO_PACKET_PAYLOAD_START),
+				reinterpret_cast<char *>(nb->unsafeData),
 				(int)(packetSize - ZT_PROTO_PACKET_PAYLOAD_START),
 				ZT_BUF_MEM_SIZE - ZT_PROTO_PACKET_PAYLOAD_START);
 
@@ -402,7 +402,7 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 			case Protocol::VERB_MULTICAST:                  ok = RR->vl2->_MULTICAST(tPtr,path,peer,*pkt.b,(int)packetSize); break;
 			case Protocol::VERB_ENCAP:                      ok = _ENCAP(tPtr,path,peer,*pkt.b,(int)packetSize); break;
 			default:
-				RR->t->incomingPacketDropped(tPtr,0xdeadeff0,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_UNRECOGNIZED_VERB);
+				RR->t->incomingPacketDropped(tPtr,0xeeeeeff0,ph->packetId,0,identityFromPeerPtr(peer),path->address(),hops,verb,ZT_TRACE_PACKET_DROP_REASON_UNRECOGNIZED_VERB);
 				break;
 		}
 		if (ok)
@@ -414,10 +414,10 @@ void VL1::onRemotePacket(void *const tPtr,const int64_t localSocket,const InetAd
 
 void VL1::_relay(void *tPtr,const SharedPtr<Path> &path,const Address &destination,SharedPtr<Buf> &data,unsigned int len)
 {
-	const uint8_t newHopCount = (data->b[ZT_PROTO_PACKET_FLAGS_INDEX] & 7U) + 1;
+	const uint8_t newHopCount = (data->lI8(ZT_PROTO_PACKET_FLAGS_INDEX) & 7U) + 1;
 	if (newHopCount >= ZT_RELAY_MAX_HOPS)
 		return;
-	data->b[ZT_PROTO_PACKET_FLAGS_INDEX] = (data->b[ZT_PROTO_PACKET_FLAGS_INDEX] & 0xf8U) | newHopCount;
+	data->sI8(ZT_PROTO_PACKET_FLAGS_INDEX,(data->lI8(ZT_PROTO_PACKET_FLAGS_INDEX) & 0xf8U) | newHopCount);
 
 	const SharedPtr<Peer> toPeer(RR->topology->peer(tPtr,destination,false));
 	if (!toPeer)
@@ -427,7 +427,7 @@ void VL1::_relay(void *tPtr,const SharedPtr<Path> &path,const Address &destinati
 	if (!toPath)
 		return;
 
-	toPath->send(RR,tPtr,data->b,len,now);
+	toPath->send(RR,tPtr,data->unsafeData,len,now);
 }
 
 void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
@@ -467,15 +467,15 @@ void VL1::_sendPendingWhois(void *const tPtr,const int64_t now)
 
 		int outl = sizeof(Protocol::Header);
 		while ((a != toSend.end())&&(outl < ZT_PROTO_MAX_PACKET_LENGTH)) {
-			a->copyTo(outp.b + outl);
+			a->copyTo(outp.unsafeData + outl);
 			++a;
 			outl += ZT_ADDRESS_LENGTH;
 		}
 
 		if (outl > sizeof(Protocol::Header)) {
-			Protocol::armor(outp,outl,root->key(),peer->cipher());
+			Protocol::armor(outp,outl,root->key(),root->cipher());
 			RR->expect->sending(ph.packetId,now);
-			rootPath->send(RR,tPtr,outp.b,outl,now);
+			rootPath->send(RR,tPtr,outp.unsafeData,outl,now);
 		}
 	}
 }
@@ -525,7 +525,7 @@ bool VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 		Protocol::salsa2012DeriveKey(peer->key(),perPacketKey,pkt,packetSize);
 		Salsa20(perPacketKey,&p.h.packetId).crypt12(Utils::ZERO256,macKey,ZT_POLY1305_KEY_LEN);
 		uint64_t mac[2];
-		poly1305(mac,pkt.b + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
+		poly1305(mac,pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
 		if (p.h.mac != mac[0]) {
 			RR->t->incomingPacketDropped(tPtr,0x11bfff81,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 			return false;
@@ -542,8 +542,8 @@ bool VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 		}
 		packetSize -= ZT_HMACSHA384_LEN;
 		KBKDFHMACSHA384(key,ZT_PROTO_KDF_KEY_LABEL_HELLO_HMAC,0,0,hmacKey); // iter == 0 for HELLO, 1 for OK(HELLO)
-		HMACSHA384(hmacKey,pkt.b,packetSize,hmac);
-		if (!Utils::secureEq(pkt.b + packetSize,hmac,ZT_HMACSHA384_LEN)) {
+		HMACSHA384(hmacKey,pkt.unsafeData,packetSize,hmac);
+		if (!Utils::secureEq(pkt.unsafeData + packetSize,hmac,ZT_HMACSHA384_LEN)) {
 			RR->t->incomingPacketDropped(tPtr,0x1000662a,p.h.packetId,0,id,path->address(),hops,Protocol::VERB_NOP,ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
 			return false;
 		}
@@ -568,10 +568,10 @@ bool VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 		// can't even get ephemeral public keys without first knowing the long term secret key,
 		// adding a little defense in depth.
 		uint8_t iv[8];
-		for (int i = 0; i < 8; ++i) iv[i] = pkt.b[i];
+		for (int i = 0; i < 8; ++i) iv[i] = pkt.unsafeData[i];
 		iv[7] &= 0xf8U; // this exists for pure legacy reasons, meh...
 		Salsa20 s20(key,iv);
-		s20.crypt12(pkt.b + ptr,pkt.b + ptr,packetSize - ptr);
+		s20.crypt12(pkt.unsafeData + ptr,pkt.unsafeData + ptr,packetSize - ptr);
 
 		ptr += pkt.rI16(ptr); // skip length field which currently is always zero in v2.0+
 
@@ -654,23 +654,23 @@ bool VL1::_HELLO(void *tPtr,const SharedPtr<Path> &path,SharedPtr<Peer> &peer,Bu
 	int outl = sizeof(Protocol::OK::HELLO);
 	outp.wO(outl,path->address());
 
-	outp.wI(outl,(uint16_t)0); // legacy field, always 0
+	outp.wI16(outl,0); // legacy field, always 0
 
 	if (p.versionProtocol >= 11) {
-		outp.wI(outl,(uint16_t)myNodeMetaDataBin.size());
+		outp.wI16(outl,(uint16_t)myNodeMetaDataBin.size());
 		outp.wB(outl,myNodeMetaDataBin.data(),(unsigned int)myNodeMetaDataBin.size());
-		outp.wI(outl,(uint16_t)0); // length of additional fields, currently 0
+		outp.wI16(outl,0); // length of additional fields, currently 0
 
 		if ((outl + ZT_HMACSHA384_LEN) > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check, shouldn't be possible
 			return false;
 
 		KBKDFHMACSHA384(key,ZT_PROTO_KDF_KEY_LABEL_HELLO_HMAC,0,1,hmacKey); // iter == 1 for OK
-		HMACSHA384(hmacKey,outp.b + sizeof(ok.h),outl - sizeof(ok.h),outp.b + outl);
+		HMACSHA384(hmacKey,outp.unsafeData + sizeof(ok.h),outl - sizeof(ok.h),outp.unsafeData + outl);
 		outl += ZT_HMACSHA384_LEN;
 	}
 
 	Protocol::armor(outp,outl,peer->key(),peer->cipher());
-	path->send(RR,tPtr,outp.b,outl,now);
+	path->send(RR,tPtr,outp.unsafeData,outl,now);
 
 	return true;
 }
@@ -781,7 +781,7 @@ bool VL1::_WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &p
 
 		int outl = sizeof(Protocol::OK::WHOIS);
 		while ( ((ptr + ZT_ADDRESS_LENGTH) <= packetSize) && ((outl + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX) < ZT_PROTO_MAX_PACKET_LENGTH) ) {
-			const SharedPtr<Peer> &wp(RR->topology->peer(tPtr,Address(pkt.b + ptr)));
+			const SharedPtr<Peer> &wp(RR->topology->peer(tPtr,Address(pkt.unsafeData + ptr)));
 			if (wp) {
 				outp.wO(outl,wp->identity());
 				if (peer->remoteVersionProtocol() >= 11) { // older versions don't know what a locator is
@@ -798,7 +798,7 @@ bool VL1::_WHOIS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &p
 
 		if (outl > sizeof(Protocol::OK::WHOIS)) {
 			Protocol::armor(outp,outl,peer->key(),peer->cipher());
-			path->send(RR,tPtr,outp.b,outl,RR->node->now());
+			path->send(RR,tPtr,outp.unsafeData,outl,RR->node->now());
 		}
 	}
 
@@ -823,28 +823,27 @@ bool VL1::_RENDEZVOUS(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Pee
 					case 4:
 					case 16:
 						if ((sizeof(Protocol::RENDEZVOUS) + rdv.addressLength) <= packetSize) {
-							const InetAddress atAddr(pkt.b + sizeof(Protocol::RENDEZVOUS),rdv.addressLength,port);
-							peer->contact(tPtr,Endpoint(atAddr),now,false,false);
-							RR->t->tryingNewPath(tPtr,0x55a19aaa,with->identity(),atAddr,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->address(),peer->identity().hash(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
+							const InetAddress atAddr(pkt.unsafeData + sizeof(Protocol::RENDEZVOUS),rdv.addressLength,port);
+							peer->contact(tPtr,Endpoint(atAddr),now,false);
+							RR->t->tryingNewPath(tPtr,0x55a19aaa,with->identity(),atAddr,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->address(),peer->identity().hash().data(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
 						}
 						break;
-					case 255:
-						if ((sizeof(Protocol::RENDEZVOUS) + 1) <= packetSize) {
-							Endpoint ep;
-							int epl = ep.unmarshal(pkt.b + sizeof(Protocol::RENDEZVOUS),packetSize - (int)sizeof(Protocol::RENDEZVOUS));
-							if ((epl > 0) && (ep)) {
-								switch (ep.type()) {
-									case Endpoint::INETADDR_V4:
-									case Endpoint::INETADDR_V6:
-										peer->contact(tPtr,ep,now,false,false);
-										RR->t->tryingNewPath(tPtr,0x55a19aab,with->identity(),ep.inetAddr(),path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->address(),peer->identity().hash(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
-										break;
-									default:
-										break;
-								}
+					case 255: {
+						Endpoint ep;
+						int p = sizeof(Protocol::RENDEZVOUS);
+						int epl = pkt.rO(p,ep);
+						if ((epl > 0) && (ep) && (!Buf::readOverflow(p,packetSize))) {
+							switch (ep.type()) {
+								case Endpoint::TYPE_INETADDR_V4:
+								case Endpoint::TYPE_INETADDR_V6:
+									peer->contact(tPtr,ep,now,false);
+									RR->t->tryingNewPath(tPtr,0x55a19aab,with->identity(),ep.inetAddr(),path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->address(),peer->identity().hash().data(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
+									break;
+								default:
+									break;
 							}
 						}
-						break;
+					} break;
 				}
 			}
 		}
@@ -872,7 +871,7 @@ bool VL1::_ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &pe
 		outh.h.inReVerb = Protocol::VERB_ECHO;
 		outh.h.inRePacketId = packetId;
 		int outl = sizeof(Protocol::OK::ECHO);
-		outp.wB(outl,pkt.b + sizeof(Protocol::Header),packetSize - sizeof(Protocol::Header));
+		outp.wB(outl,pkt.unsafeData + sizeof(Protocol::Header),packetSize - sizeof(Protocol::Header));
 
 		if (Buf::writeOverflow(outl)) {
 			RR->t->incomingPacketDropped(tPtr,0x14d70bb0,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
@@ -880,7 +879,7 @@ bool VL1::_ECHO(void *tPtr,const SharedPtr<Path> &path,const SharedPtr<Peer> &pe
 		}
 
 		Protocol::armor(outp,outl,peer->key(),peer->cipher());
-		path->send(RR,tPtr,outp.b,outl,now);
+		path->send(RR,tPtr,outp.unsafeData,outl,now);
 	} else {
 		RR->t->incomingPacketDropped(tPtr,0x27878bc1,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
 	}
@@ -908,13 +907,21 @@ bool VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const Shared
 	Endpoint ep;
 	for(unsigned int pi=0;pi<numPaths;++pi) {
 		/*const uint8_t flags = pkt.rI8(ptr);*/ ++ptr; // flags are not presently used
-		ptr += pkt.rI16(ptr); // extended attributes size, currently always 0
+
+		const int xas = (int)pkt.rI16(ptr);
+		//const uint8_t *const extendedAttrs = pkt.rBnc(ptr,xas);
+		ptr += xas;
+
 		const unsigned int addrType = pkt.rI8(ptr);
 		const unsigned int addrRecordLen = pkt.rI8(ptr);
 		if (addrRecordLen == 0) {
 			RR->t->incomingPacketDropped(tPtr,0xaed00118,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
 			return false;
 		}
+		if (Buf::readOverflow(ptr,packetSize)) {
+			RR->t->incomingPacketDropped(tPtr,0xb450e10f,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+			return false;
+		}
 
 		const void *addrBytes = nullptr;
 		unsigned int addrLen = 0;
@@ -934,11 +941,14 @@ bool VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const Shared
 				addrLen = 16;
 				addrPort = pkt.rI16(ptr);
 				break;
+			//case 200:
+				// TODO: this would be a WebRTC SDP offer contained in the extended attrs field
+				//break;
 			default: break;
 		}
 
 		if (Buf::readOverflow(ptr,packetSize)) {
-			RR->t->incomingPacketDropped(tPtr,0xbad0f10f,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
+			RR->t->incomingPacketDropped(tPtr,0xb4d0f10f,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
 			return false;
 		}
 
@@ -951,8 +961,8 @@ bool VL1::_PUSH_DIRECT_PATHS(void *tPtr,const SharedPtr<Path> &path,const Shared
 			}
 
 			switch(ep.type()) {
-				case Endpoint::INETADDR_V4:
-				case Endpoint::INETADDR_V6:
+				case Endpoint::TYPE_INETADDR_V4:
+				case Endpoint::TYPE_INETADDR_V6:
 					a = ep.inetAddr();
 					break;
 				default: // other types are not supported yet