Browse Source

A bunch of wiring up of stuff...

Adam Ierymenko 5 years ago
parent
commit
3ff9ffd5d4

+ 1 - 1
Makefile

@@ -14,7 +14,7 @@ debug:
 	mkdir -p ${BUILDDIR} && cd ${BUILDDIR} && cmake .. -DCMAKE_BUILD_TYPE=Debug && $(MAKE)
 	mkdir -p ${BUILDDIR} && cd ${BUILDDIR} && cmake .. -DCMAKE_BUILD_TYPE=Debug && $(MAKE)
 
 
 clean:
 clean:
-	rm -rf ${BUILDDIR} cmake-build-*
+	rm -rf ${BUILDDIR}
 
 
 distclean:
 distclean:
 	rm -rf ${BUILDDIR}
 	rm -rf ${BUILDDIR}

+ 178 - 0
attic/TinyVector.hpp

@@ -0,0 +1,178 @@
+/*
+ * Copyright (c)2019 ZeroTier, Inc.
+ *
+ * Use of this software is governed by the Business Source License included
+ * in the LICENSE.TXT file in the project's root directory.
+ *
+ * Change Date: 2023-01-01
+ *
+ * On the date above, in accordance with the Business Source License, use
+ * of this software will be governed by version 2.0 of the Apache License.
+ */
+/****/
+
+#ifndef ZT_TINYVECTOR_HPP
+#define ZT_TINYVECTOR_HPP
+
+#include "Constants.hpp"
+#include "Utils.hpp"
+
+#include <utility>
+#include <stdexcept>
+#include <algorithm>
+
+namespace ZeroTier {
+
+/**
+ * Tiny vector with a static base capacity for allocation-free operation at small sizes
+ *
+ * This doesn't support all of std::vector, uses low-level memcpy to relocate things, and
+ * lacks bounds checking. It's only intended for uses where a minimal subset of the vector
+ * container is needed, the objects are primitive or safe to handle in this way, and the
+ * number of items is typically less than or equal to some statically definable value.
+ *
+ * Examples of safe objects for this include primitive types, Str, SharedPtr, InetAddress,
+ * Address, MAC, etc.
+ *
+ * @tparam T Type to encapsulate
+ * @tparam BASE Base number of items to allocate storage inside the object itself (default: 4)
+ */
+template<typename T,unsigned long BASE = 4>
+class TinyVector
+{
+public:
+	typedef unsigned long size_t;
+	typedef T * iterator;
+	typedef const T * const_iterator;
+	typedef T & reference;
+	typedef const T & const_reference;
+
+	ZT_ALWAYS_INLINE TinyVector() :
+		_v((void *)_baseMem),
+		_c(BASE),
+		_l(0)
+	{
+	}
+
+	ZT_ALWAYS_INLINE TinyVector(const TinyVector &vec) :
+		_v((void *)_baseMem),
+		_c(BASE),
+		_l(0)
+	{
+		*this = vec;
+	}
+
+	ZT_ALWAYS_INLINE ~TinyVector()
+	{
+		clear();
+		if (_v != (void *)_baseMem)
+			free(_v);
+	}
+
+	ZT_ALWAYS_INLINE TinyVector &operator=(const TinyVector &vec)
+	{
+		unsigned long i = 0;
+		if (_l < vec._l) {
+			while (i < _l) {
+				reinterpret_cast<T *>(_v)[i] = reinterpret_cast<const T *>(vec._v)[i];
+				++i;
+			}
+
+			if (vec._l > _c) {
+				unsigned long nc = vec._c;
+				void *nv;
+				if (_v == (void *)_baseMem) {
+					nv = malloc(nc);
+					memcpy(nv,_v,sizeof(T) * _l);
+				} else {
+					nv = realloc(_v,nc);
+					if (!nv)
+						throw std::bad_alloc();
+				}
+				_v = nv;
+				_c = nc;
+			}
+
+			while (i < vec._l) {
+				new (reinterpret_cast<T *>(_v) + i) T(reinterpret_cast<const T *>(vec._v)[i]);
+				++i;
+			}
+		} else {
+			while (i < vec._l) {
+				reinterpret_cast<T *>(_v)[i] = reinterpret_cast<const T *>(vec._v)[i];
+				++i;
+			}
+			if (!Utils::isPrimitiveType<T>()) {
+				while (i < _l)
+					reinterpret_cast<T *>(_v)[i++]->~T();
+			}
+		}
+		_l = vec._l;
+	}
+
+	ZT_ALWAYS_INLINE void clear()
+	{
+		if (!Utils::isPrimitiveType<T>()) {
+			for (unsigned long i = 0; i < _l; ++i)
+				reinterpret_cast<T *>(_v)[i]->~T();
+		}
+		_l = 0;
+	}
+
+	ZT_ALWAYS_INLINE void push_back(const T &v)
+	{
+		if (_l >= _c) {
+			unsigned long nc = _c << 1U;
+			void *nv;
+			if (_v == (void *)_baseMem) {
+				nv = malloc(sizeof(T) * nc);
+				memcpy(nv,_v,sizeof(T) * _l);
+			} else {
+				nv = realloc(_v,sizeof(T) * nc);
+				if (!nv)
+					throw std::bad_alloc();
+			}
+			_v = nv;
+			_c = nc;
+		}
+		new (reinterpret_cast<T *>(_v) + _l++) T(v);
+	}
+
+	ZT_ALWAYS_INLINE void pop_back()
+	{
+		if (!Utils::isPrimitiveType<T>())
+			reinterpret_cast<T *>(_v)[_l]->~T();
+		--_l;
+	}
+
+	ZT_ALWAYS_INLINE reference front() { reinterpret_cast<T *>(_v)[0]; }
+	ZT_ALWAYS_INLINE const_reference front() const { reinterpret_cast<T *>(_v)[0]; }
+	ZT_ALWAYS_INLINE reference back() { reinterpret_cast<T *>(_v)[_l - 1]; }
+	ZT_ALWAYS_INLINE const_reference back() const { reinterpret_cast<T *>(_v)[_l - 1]; }
+
+	ZT_ALWAYS_INLINE unsigned long size() const { return _l; }
+	ZT_ALWAYS_INLINE bool empty() const { return (_l == 0); }
+
+	ZT_ALWAYS_INLINE iterator begin() { return reinterpret_cast<T *>(_v); }
+	ZT_ALWAYS_INLINE iterator end() { return (reinterpret_cast<T *>(_v) + _l); }
+	ZT_ALWAYS_INLINE const_iterator begin() const { return reinterpret_cast<T *>(_v); }
+	ZT_ALWAYS_INLINE const_iterator end() const { return (reinterpret_cast<T *>(_v) + _l); }
+
+	ZT_ALWAYS_INLINE T *data() { return reinterpret_cast<T *>(_v); }
+	ZT_ALWAYS_INLINE const T *data() const { return reinterpret_cast<T *>(_v); }
+
+	ZT_ALWAYS_INLINE reference operator[](const unsigned long i) { return reinterpret_cast<T *>(_v)[i]; }
+	ZT_ALWAYS_INLINE const_reference operator[](const unsigned long i) const { return reinterpret_cast<T *>(_v)[i]; }
+	ZT_ALWAYS_INLINE reference at(const unsigned long i) { return reinterpret_cast<T *>(_v)[i]; }
+	ZT_ALWAYS_INLINE const_reference at(const unsigned long i) const { return reinterpret_cast<T *>(_v)[i]; }
+
+private:
+	uint8_t _baseMem[BASE * sizeof(T)];
+	void *_v;
+	unsigned long _c;
+	unsigned long _l;
+};
+
+} // namespace ZeroTier
+
+#endif

+ 37 - 39
go/cmd/zerotier/cli/help.go

@@ -30,47 +30,45 @@ func Help() {
 Usage: zerotier [-options] <command> [command args]
 Usage: zerotier [-options] <command> [command args]
 
 
 Global Options:
 Global Options:
-  -j                                     Output raw JSON where applicable
-  -p <path>                              Use alternate base path
-  -t <path>                              Use secret auth token from this file
+  -j                                   Output raw JSON where applicable
+  -p <path>                            Use alternate base path
+  -t <path>                            Use secret auth token from this file
 
 
 Commands:
 Commands:
-  help                                   Show this help
-  version                                Print version
-  selftest                               Run internal tests
-  service [mode]                         Start as service (default mode: node)
-    node                                 Start in normal node mode (default)
-    root [options]                       Start in root server mode (see docs)
-  status                                 Show ZeroTier status and config
-  peers                                  Show VL1 peers
-  roots                                  Show configured VL1 root servers
-  addroot <url|identity> [ip/port] [...] Add VL1 root server
-  removeroot <identity|address>          Remove VL1 root server
-  identity <command> [args]              Identity management commands
-    new [c25519|p384]                    Create identity (including secret)
-    getpublic <identity>                 Extract only public part of identity
-    validate <identity>                  Locally validate an identity
-    sign <identity> <file>               Sign a file with an identity's key
-    verify <identity> <file> <sig>       Verify a signature
-  networks                               List joined VL2 virtual networks
-  network <network ID>                   Show verbose network info
-  join <network ID>                      Join a virtual network
-  leave <network ID>                     Leave a virtual network
-  set <network ID> <option> <value>      Set a network local config option
-    manageips <boolean>                  Is IP management allowed?
-    manageroutes <boolean>               Is route management allowed?
-    globalips <boolean>                  Allow assignment of global IPs?
-    globalroutes <boolean>               Can global IP space routes be set?
-    defaultroute <boolean>               Can default route be overridden?
-  set <local config option> <value>      Set a local configuration option
-    phy <IP/bits> blacklist <boolean>    Set or clear blacklist for CIDR
-    phy <IP/bits> trust <path ID/0>      Set or clear trusted path ID for CIDR
-    virt <address> try <IP/port> [...]   Set explicit IPs for reaching a peer
-    port <port>                          Set primary local port for VL1 P2P
-    secondaryport <port/0>               Set or disable secondary VL1 P2P port
-    tertiaryport <port/0>                Set or disable tertiary VL1 P2P port
-    portsearch <boolean>                 Set or disable port search on startup
-    portmapping <boolean>                Set or disable use of uPnP/NAT-PMP
+  help                                 Show this help
+  version                              Print version
+  selftest                             Run internal tests
+  service                              Start as service
+  status                               Show ZeroTier status and config
+  peers                                Show VL1 peers
+  roots                                Show configured VL1 root servers
+  addroot <identity> [IP/port]         Add VL1 root
+  removeroot <identity|address>        Remove VL1 root server
+  identity <command> [args]            Identity management commands
+    new [c25519|p384]                  Create identity (including secret)
+    getpublic <identity>               Extract only public part of identity
+    validate <identity>                Locally validate an identity
+    sign <identity> <file>             Sign a file with an identity's key
+    verify <identity> <file> <sig>     Verify a signature
+  networks                             List joined VL2 virtual networks
+  network <network ID>                 Show verbose network info
+  join <network ID>                    Join a virtual network
+  leave <network ID>                   Leave a virtual network
+  set <network ID> <option> <value>    Set a network local config option
+    manageips <boolean>                Is IP management allowed?
+    manageroutes <boolean>             Is route management allowed?
+    globalips <boolean>                Allow assignment of global IPs?
+    globalroutes <boolean>             Can global IP space routes be set?
+    defaultroute <boolean>             Can default route be overridden?
+  set <local config option> <value>    Set a local configuration option
+    phy <IP/bits> blacklist <boolean>  Set or clear blacklist for CIDR
+    phy <IP/bits> trust <path ID/0>    Set or clear trusted path ID for CIDR
+    virt <address> try <IP/port> [...] Set explicit IPs for reaching a peer
+    port <port>                        Set primary local port for VL1 P2P
+    secondaryport <port/0>             Set or disable secondary VL1 P2P port
+    tertiaryport <port/0>              Set or disable tertiary VL1 P2P port
+    portsearch <boolean>               Set or disable port search on startup
+    portmapping <boolean>              Set or disable use of uPnP/NAT-PMP
 
 
 Most commands require a secret token to permit control of a running ZeroTier
 Most commands require a secret token to permit control of a running ZeroTier
 service. The CLI will automatically try to read this token from the
 service. The CLI will automatically try to read this token from the

+ 78 - 48
go/pkg/zerotier/endpoint.go

@@ -16,8 +16,14 @@ const (
 	EndpointTypeUnrecognized = 255
 	EndpointTypeUnrecognized = 255
 )
 )
 
 
+// Endpoint wraps a variety of different ways of describing a node's physical network location.
 type Endpoint struct {
 type Endpoint struct {
+	// Type is this endpoint's type
 	Type          int
 	Type          int
+
+	// Location is the X, Y, Z coordinate of this endpoint or 0,0,0 if unspecified.
+	Location      [3]int
+
 	value, value2 interface{}
 	value, value2 interface{}
 }
 }
 
 
@@ -26,90 +32,114 @@ var (
 )
 )
 
 
 func (ep *Endpoint) unmarshalZT(b []byte) (int, error) {
 func (ep *Endpoint) unmarshalZT(b []byte) (int, error) {
-	if len(b) == 0 {
+	if len(b) < 7 {
 		return 0, ErrInvalidEndpoint
 		return 0, ErrInvalidEndpoint
 	}
 	}
-	switch b[0] {
+	ep.Type = int(b[0])
+	ep.Location[0] = int(binary.BigEndian.Uint16(b[1:3]))
+	ep.Location[1] = int(binary.BigEndian.Uint16(b[3:5]))
+	ep.Location[2] = int(binary.BigEndian.Uint16(b[5:7]))
+	ep.value = nil
+	ep.value2 = nil
+	switch ep.Type {
 	case EndpointTypeNil:
 	case EndpointTypeNil:
-		*ep = Endpoint{Type: EndpointTypeNil}
-		return 1, nil
+		return 7, nil
 	case EndpointTypeInetAddr:
 	case EndpointTypeInetAddr:
 		ina := new(InetAddress)
 		ina := new(InetAddress)
-		inlen, err := ina.unmarshalZT(b[1:])
+		inlen, err := ina.unmarshalZT(b[7:])
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		*ep = Endpoint{
-			Type:  EndpointTypeInetAddr,
-			value: ina,
-		}
-		return 1 + inlen, nil
+		ep.value = ina
+		return 7 + inlen, nil
 	case EndpointTypeDnsName:
 	case EndpointTypeDnsName:
-		zeroAt := 1
-		for i := 1; i < len(b); i++ {
+		stringEnd := 0
+		for i := 7; i < len(b); i++ {
 			if b[i] == 0 {
 			if b[i] == 0 {
-				zeroAt = i
+				stringEnd = i + 1
 				break
 				break
 			}
 			}
 		}
 		}
-		if zeroAt == 1 || (1 + zeroAt + 3) > len(b) {
+		if stringEnd == 0 || (stringEnd + 2) > len(b) {
 			return 0, ErrInvalidEndpoint
 			return 0, ErrInvalidEndpoint
 		}
 		}
-		port := binary.BigEndian.Uint16(b[zeroAt+1:zeroAt+3])
-		*ep = Endpoint{
-			Type:   EndpointTypeDnsName,
-			value:  string(b[1:zeroAt]),
-			value2: &port,
-		}
-		return zeroAt + 3, nil
+		ep.value = string(b[7:stringEnd])
+		port := binary.BigEndian.Uint16(b[stringEnd:stringEnd+2])
+		ep.value2 = &port
+		return stringEnd + 2, nil
 	case EndpointTypeZeroTier:
 	case EndpointTypeZeroTier:
-		if len(b) != 54 {
+		if len(b) < 60 {
 			return 0, ErrInvalidEndpoint
 			return 0, ErrInvalidEndpoint
 		}
 		}
-		a, err := NewAddressFromBytes(b[1:6])
+		a, err := NewAddressFromBytes(b[7:12])
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		*ep = Endpoint{
-			Type:   EndpointTypeZeroTier,
-			value:  a,
-			value2: append(make([]byte, 0, 48), b[6:54]...),
-		}
-		return 54, nil
+		ep.value = a
+		ep.value2 = append(make([]byte,0,48),b[12:60]...)
+		return 60, nil
 	case EndpointTypeUrl:
 	case EndpointTypeUrl:
-		zeroAt := 1
-		for i := 1; i < len(b); i++ {
+		stringEnd := 0
+		for i := 7; i < len(b); i++ {
 			if b[i] == 0 {
 			if b[i] == 0 {
-				zeroAt = i
+				stringEnd = i + 1
 				break
 				break
 			}
 			}
 		}
 		}
-		if zeroAt == 1 {
+		if stringEnd == 0 {
 			return 0, ErrInvalidEndpoint
 			return 0, ErrInvalidEndpoint
 		}
 		}
-		*ep = Endpoint{
-			Type:  EndpointTypeUrl,
-			value: string(b[1:zeroAt]),
-		}
-		return zeroAt + 2, nil
+		ep.value = string(b[7:stringEnd])
+		return stringEnd, nil
 	case EndpointTypeEthernet:
 	case EndpointTypeEthernet:
-		if len(b) != 7 {
+		if len(b) < 13 {
 			return 0, ErrInvalidEndpoint
 			return 0, ErrInvalidEndpoint
 		}
 		}
-		m, err := NewMACFromBytes(b[1:7])
+		m, err := NewMACFromBytes(b[7:13])
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		*ep = Endpoint{
-			Type: EndpointTypeEthernet,
-			value: m,
-		}
-		return 7, nil
+		ep.value = m
+		return 13, nil
 	default:
 	default:
-		if len(b) < 2 {
+		if len(b) < 8 {
 			return 0, ErrInvalidEndpoint
 			return 0, ErrInvalidEndpoint
 		}
 		}
-		*ep = Endpoint{Type: EndpointTypeUnrecognized}
-		return 1 + int(b[1]), nil
+		ep.Type = EndpointTypeUnrecognized
+		return 8 + int(b[1]), nil
 	}
 	}
 }
 }
+
+// InetAddress gets the address associated with this endpoint or nil if it is not of this type.
+func (ep *Endpoint) InetAddress() *InetAddress {
+	v, _ := ep.value.(*InetAddress)
+	return v
+}
+
+// Address gets the address associated with this endpoint or nil if it is not of this type.
+func (ep *Endpoint) Address() *Address {
+	v, _ := ep.value.(*Address)
+	return v
+}
+
+// DNSName gets the DNS name and port associated with this endpoint or an empty string and -1 if it is not of this type.
+func (ep *Endpoint) DNSName() (string, int) {
+	if ep.Type == EndpointTypeDnsName {
+		return ep.value.(string), int(*(ep.value2.(*uint16)))
+	}
+	return "", -1
+}
+
+// InetAddress gets the URL assocaited with this endpoint or an empty string if it is not of this type.
+func (ep *Endpoint) URL() string {
+	if ep.Type == EndpointTypeUrl {
+		return ep.value.(string)
+	}
+	return ""
+}
+
+// Ethernet gets the address associated with this endpoint or nil if it is not of this type.
+func (ep *Endpoint) Ethernet() *MAC {
+	v, _ := ep.value.(*MAC)
+	return v
+}

+ 0 - 2
go/pkg/zerotier/errors.go

@@ -29,8 +29,6 @@ const (
 	ErrTapInitFailed            Err = "unable to create native Tap instance"
 	ErrTapInitFailed            Err = "unable to create native Tap instance"
 	ErrUnrecognizedIdentityType Err = "unrecognized identity type"
 	ErrUnrecognizedIdentityType Err = "unrecognized identity type"
 	ErrInvalidKey               Err = "invalid key data"
 	ErrInvalidKey               Err = "invalid key data"
-	ErrInvalidSignature         Err = "invalid signature"
-	ErrSecretKeyRequired        Err = "secret key required"
 )
 )
 
 
 // APIErr is returned by the JSON API when a call fails
 // APIErr is returned by the JSON API when a call fails

+ 9 - 8
go/pkg/zerotier/inetaddress.go

@@ -24,20 +24,21 @@ import (
 	"net"
 	"net"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
+	"syscall"
 	"unsafe"
 	"unsafe"
 )
 )
 
 
 func sockaddrStorageToIPNet(ss *C.struct_sockaddr_storage) *net.IPNet {
 func sockaddrStorageToIPNet(ss *C.struct_sockaddr_storage) *net.IPNet {
 	var a net.IPNet
 	var a net.IPNet
 	switch ss.ss_family {
 	switch ss.ss_family {
-	case AFInet:
+	case syscall.AF_INET:
 		sa4 := (*C.struct_sockaddr_in)(unsafe.Pointer(ss))
 		sa4 := (*C.struct_sockaddr_in)(unsafe.Pointer(ss))
 		var ip4 [4]byte
 		var ip4 [4]byte
 		copy(ip4[:], (*[4]byte)(unsafe.Pointer(&sa4.sin_addr))[:])
 		copy(ip4[:], (*[4]byte)(unsafe.Pointer(&sa4.sin_addr))[:])
 		a.IP = ip4[:]
 		a.IP = ip4[:]
 		a.Mask = net.CIDRMask(int(binary.BigEndian.Uint16(((*[2]byte)(unsafe.Pointer(&sa4.sin_port)))[:])), 32)
 		a.Mask = net.CIDRMask(int(binary.BigEndian.Uint16(((*[2]byte)(unsafe.Pointer(&sa4.sin_port)))[:])), 32)
 		return &a
 		return &a
-	case AFInet6:
+	case syscall.AF_INET6:
 		sa6 := (*C.struct_sockaddr_in6)(unsafe.Pointer(ss))
 		sa6 := (*C.struct_sockaddr_in6)(unsafe.Pointer(ss))
 		var ip6 [16]byte
 		var ip6 [16]byte
 		copy(ip6[:], (*[16]byte)(unsafe.Pointer(&sa6.sin6_addr))[:])
 		copy(ip6[:], (*[16]byte)(unsafe.Pointer(&sa6.sin6_addr))[:])
@@ -51,14 +52,14 @@ func sockaddrStorageToIPNet(ss *C.struct_sockaddr_storage) *net.IPNet {
 func sockaddrStorageToUDPAddr(ss *C.struct_sockaddr_storage) *net.UDPAddr {
 func sockaddrStorageToUDPAddr(ss *C.struct_sockaddr_storage) *net.UDPAddr {
 	var a net.UDPAddr
 	var a net.UDPAddr
 	switch ss.ss_family {
 	switch ss.ss_family {
-	case AFInet:
+	case syscall.AF_INET:
 		sa4 := (*C.struct_sockaddr_in)(unsafe.Pointer(ss))
 		sa4 := (*C.struct_sockaddr_in)(unsafe.Pointer(ss))
 		var ip4 [4]byte
 		var ip4 [4]byte
 		copy(ip4[:], (*[4]byte)(unsafe.Pointer(&sa4.sin_addr))[:])
 		copy(ip4[:], (*[4]byte)(unsafe.Pointer(&sa4.sin_addr))[:])
 		a.IP = ip4[:]
 		a.IP = ip4[:]
 		a.Port = int(binary.BigEndian.Uint16(((*[2]byte)(unsafe.Pointer(&sa4.sin_port)))[:]))
 		a.Port = int(binary.BigEndian.Uint16(((*[2]byte)(unsafe.Pointer(&sa4.sin_port)))[:]))
 		return &a
 		return &a
-	case AFInet6:
+	case syscall.AF_INET6:
 		sa6 := (*C.struct_sockaddr_in6)(unsafe.Pointer(ss))
 		sa6 := (*C.struct_sockaddr_in6)(unsafe.Pointer(ss))
 		var ip6 [16]byte
 		var ip6 [16]byte
 		copy(ip6[:], (*[16]byte)(unsafe.Pointer(&sa6.sin6_addr))[:])
 		copy(ip6[:], (*[16]byte)(unsafe.Pointer(&sa6.sin6_addr))[:])
@@ -77,14 +78,14 @@ func makeSockaddrStorage(ip net.IP, port int, ss *C.struct_sockaddr_storage) boo
 	C.memset(unsafe.Pointer(ss), 0, C.sizeof_struct_sockaddr_storage)
 	C.memset(unsafe.Pointer(ss), 0, C.sizeof_struct_sockaddr_storage)
 	if len(ip) == 4 {
 	if len(ip) == 4 {
 		sa4 := (*C.struct_sockaddr_in)(unsafe.Pointer(ss))
 		sa4 := (*C.struct_sockaddr_in)(unsafe.Pointer(ss))
-		sa4.sin_family = AFInet
+		sa4.sin_family = syscall.AF_INET
 		copy(((*[4]byte)(unsafe.Pointer(&sa4.sin_addr)))[:], ip)
 		copy(((*[4]byte)(unsafe.Pointer(&sa4.sin_addr)))[:], ip)
 		binary.BigEndian.PutUint16(((*[2]byte)(unsafe.Pointer(&sa4.sin_port)))[:], uint16(port))
 		binary.BigEndian.PutUint16(((*[2]byte)(unsafe.Pointer(&sa4.sin_port)))[:], uint16(port))
 		return true
 		return true
 	}
 	}
 	if len(ip) == 16 {
 	if len(ip) == 16 {
 		sa6 := (*C.struct_sockaddr_in6)(unsafe.Pointer(ss))
 		sa6 := (*C.struct_sockaddr_in6)(unsafe.Pointer(ss))
-		sa6.sin6_family = AFInet6
+		sa6.sin6_family = syscall.AF_INET6
 		copy(((*[16]byte)(unsafe.Pointer(&sa6.sin6_addr)))[:], ip)
 		copy(((*[16]byte)(unsafe.Pointer(&sa6.sin6_addr)))[:], ip)
 		binary.BigEndian.PutUint16(((*[2]byte)(unsafe.Pointer(&sa6.sin6_port)))[:], uint16(port))
 		binary.BigEndian.PutUint16(((*[2]byte)(unsafe.Pointer(&sa6.sin6_port)))[:], uint16(port))
 		return true
 		return true
@@ -158,9 +159,9 @@ func (ina *InetAddress) String() string {
 func (ina *InetAddress) Family() int {
 func (ina *InetAddress) Family() int {
 	switch len(ina.IP) {
 	switch len(ina.IP) {
 	case 4:
 	case 4:
-		return AFInet
+		return syscall.AF_INET
 	case 16:
 	case 16:
-		return AFInet6
+		return syscall.AF_INET6
 	}
 	}
 	return 0
 	return 0
 }
 }

+ 13 - 6
go/pkg/zerotier/localconfig.go

@@ -16,7 +16,6 @@ package zerotier
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"io/ioutil"
 	"io/ioutil"
-	rand "math/rand"
 	"os"
 	"os"
 	"runtime"
 	"runtime"
 )
 )
@@ -92,15 +91,21 @@ type LocalConfig struct {
 	Settings LocalConfigSettings `json:"settings,omitempty"`
 	Settings LocalConfigSettings `json:"settings,omitempty"`
 }
 }
 
 
-// Read this local config from a file, initializing to defaults if the file does not exist
-func (lc *LocalConfig) Read(p string, saveDefaultsIfNotExist bool) error {
+// Read this local config from a file, initializing to defaults if the file does not exist.
+func (lc *LocalConfig) Read(p string, saveDefaultsIfNotExist bool,isTotallyNewNode bool) error {
 	if lc.Physical == nil {
 	if lc.Physical == nil {
 		lc.Physical = make(map[string]LocalConfigPhysicalPathConfiguration)
 		lc.Physical = make(map[string]LocalConfigPhysicalPathConfiguration)
 		lc.Virtual = make(map[Address]LocalConfigVirtualAddressConfiguration)
 		lc.Virtual = make(map[Address]LocalConfigVirtualAddressConfiguration)
 		lc.Network = make(map[NetworkID]NetworkLocalSettings)
 		lc.Network = make(map[NetworkID]NetworkLocalSettings)
-		lc.Settings.PrimaryPort = 9993
-		lc.Settings.SecondaryPort = 16384 + (rand.Int() % 16384)
-		lc.Settings.TertiaryPort = 32768 + (rand.Int() % 16384)
+
+		// LocalConfig default settings
+		if isTotallyNewNode {
+			lc.Settings.PrimaryPort = 893
+		} else {
+			lc.Settings.PrimaryPort = 9993
+		}
+		lc.Settings.SecondaryPort = unassignedPrivilegedPorts[randomUInt() % uint(len(unassignedPrivilegedPorts))]
+		lc.Settings.TertiaryPort = int(32768 + (randomUInt() % 16384))
 		lc.Settings.PortSearch = true
 		lc.Settings.PortSearch = true
 		lc.Settings.PortMapping = true
 		lc.Settings.PortMapping = true
 		lc.Settings.LogSizeMax = 128
 		lc.Settings.LogSizeMax = 128
@@ -108,6 +113,8 @@ func (lc *LocalConfig) Read(p string, saveDefaultsIfNotExist bool) error {
 		switch runtime.GOOS {
 		switch runtime.GOOS {
 		case "windows":
 		case "windows":
 			lc.Settings.InterfacePrefixBlacklist = []string{"loopback"}
 			lc.Settings.InterfacePrefixBlacklist = []string{"loopback"}
+		case "darwin":
+			lc.Settings.InterfacePrefixBlacklist = []string{"lo","utun","feth"}
 		default:
 		default:
 			lc.Settings.InterfacePrefixBlacklist = []string{"lo"}
 			lc.Settings.InterfacePrefixBlacklist = []string{"lo"}
 		}
 		}

+ 24 - 14
go/pkg/zerotier/locator.go

@@ -12,35 +12,45 @@ var (
 	ErrInvalidLocator = errors.New("invalid marshaled locator object")
 	ErrInvalidLocator = errors.New("invalid marshaled locator object")
 )
 )
 
 
+// Timestamp returns this locator's timestamp in milliseconds since epoch.
 func (l Locator) Timestamp() int64 {
 func (l Locator) Timestamp() int64 {
 	if len(l) >= 8 {
 	if len(l) >= 8 {
-		return int64(binary.BigEndian.Uint64(l))
+		return int64(binary.BigEndian.Uint64(l[0:8]))
 	}
 	}
 	return 0
 	return 0
 }
 }
 
 
+// Nil returns true if this is a nil/empty locator.
+func (l Locator) Nil() bool {
+	return len(l) < 8 || int64(binary.BigEndian.Uint64(l[0:8])) <= 0
+}
+
 // Endpoints obtains the endpoints described by this locator.
 // Endpoints obtains the endpoints described by this locator.
 func (l Locator) Endpoints() (eps []Endpoint,err error) {
 func (l Locator) Endpoints() (eps []Endpoint,err error) {
-	if len(l) <= (8 + 2) {
+	if len(l) < 8 {
 		err = ErrInvalidLocator
 		err = ErrInvalidLocator
 		return
 		return
 	}
 	}
-
-	endpointCount := int(binary.BigEndian.Uint16(l[8:10]))
-	eps = make([]Endpoint,endpointCount)
-	p := 10
-	for e:=0;e<endpointCount;e++ {
-		if p >= len(l) {
+	if int64(binary.BigEndian.Uint64(l[0:8])) > 0 {
+		if len(l) < 10 {
 			err = ErrInvalidLocator
 			err = ErrInvalidLocator
 			return
 			return
 		}
 		}
-		var elen int
-		elen, err = eps[e].unmarshalZT(l[p:])
-		if err != nil {
-			return
+		endpointCount := int(binary.BigEndian.Uint16(l[8:10]))
+		eps = make([]Endpoint, endpointCount)
+		p := 10
+		for e := 0; e < endpointCount; e++ {
+			if p >= len(l) {
+				err = ErrInvalidLocator
+				return
+			}
+			var elen int
+			elen, err = eps[e].unmarshalZT(l[p:])
+			if err != nil {
+				return
+			}
+			p += elen
 		}
 		}
-		p += elen
 	}
 	}
-
 	return
 	return
 }
 }

+ 60 - 2
go/pkg/zerotier/misc.go

@@ -16,17 +16,75 @@ package zerotier
 import (
 import (
 	"encoding/base32"
 	"encoding/base32"
 	"encoding/binary"
 	"encoding/binary"
+	"math/rand"
 	"net"
 	"net"
+	"sync"
 	"time"
 	"time"
 	"unsafe"
 	"unsafe"
 )
 )
 
 
-// ZeroTierLogoChar is the unicode character that is ZeroTier's logo
-const ZeroTierLogoChar = "⏁"
+// LogoChar is the unicode character that is ZeroTier's logo
+const LogoChar = "⏁"
 
 
 // Base32StdLowerCase is a base32 encoder/decoder using a lower-case standard alphabet and no padding.
 // Base32StdLowerCase is a base32 encoder/decoder using a lower-case standard alphabet and no padding.
 var Base32StdLowerCase = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567").WithPadding(base32.NoPadding)
 var Base32StdLowerCase = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567").WithPadding(base32.NoPadding)
 
 
+var unassignedPrivilegedPorts = []int{
+	4,
+	6,
+	8,
+	10,
+	12,
+	14,
+	15,
+	16,
+	26,
+	28,
+	30,
+	32,
+	34,
+	36,
+	40,
+	60,
+	269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+	285,
+	288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+	323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+	334, 335, 336, 337, 338, 339, 340, 341, 342, 343,
+	703,
+	708,
+	713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728,
+	732, 733, 734, 735, 736, 737, 738, 739, 740,
+	743,
+	745, 746,
+	755, 756,
+	766,
+	768,
+	778, 779,
+	781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799,
+	802, 803, 804, 805, 806, 807, 808, 809,
+	811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827,
+	834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
+	849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859,
+	862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872,
+	874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885,
+	889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899,
+	904, 905, 906, 907, 908, 909, 910, 911,
+	914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988,
+	1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009,
+	1023,
+}
+
+var prng = rand.NewSource(time.Now().UnixNano())
+var prngLock sync.Mutex
+
+func randomUInt() uint {
+	prngLock.Lock()
+	i := prng.Int63()
+	prngLock.Unlock()
+	return uint(i)
+}
+
 // TimeMs returns the time in milliseconds since epoch.
 // TimeMs returns the time in milliseconds since epoch.
 func TimeMs() int64 { return int64(time.Now().UnixNano()) / int64(1000000) }
 func TimeMs() int64 { return int64(time.Now().UnixNano()) / int64(1000000) }
 
 

+ 15 - 14
go/pkg/zerotier/nativetap.go

@@ -24,6 +24,7 @@ import (
 	"net"
 	"net"
 	"sync"
 	"sync"
 	"sync/atomic"
 	"sync/atomic"
+	"syscall"
 	"unsafe"
 	"unsafe"
 )
 )
 
 
@@ -70,12 +71,12 @@ func (t *nativeTap) AddIP(ip *net.IPNet) error {
 		if bits > 128 || bits < 0 {
 		if bits > 128 || bits < 0 {
 			return ErrInvalidParameter
 			return ErrInvalidParameter
 		}
 		}
-		C.ZT_GoTap_addIp(t.tap, C.int(AFInet6), unsafe.Pointer(&ip.IP[0]), C.int(bits))
+		C.ZT_GoTap_addIp(t.tap, C.int(syscall.AF_INET6), unsafe.Pointer(&ip.IP[0]), C.int(bits))
 	} else if len(ip.IP) == 4 {
 	} else if len(ip.IP) == 4 {
 		if bits > 32 || bits < 0 {
 		if bits > 32 || bits < 0 {
 			return ErrInvalidParameter
 			return ErrInvalidParameter
 		}
 		}
-		C.ZT_GoTap_addIp(t.tap, C.int(AFInet), unsafe.Pointer(&ip.IP[0]), C.int(bits))
+		C.ZT_GoTap_addIp(t.tap, C.int(syscall.AF_INET), unsafe.Pointer(&ip.IP[0]), C.int(bits))
 	}
 	}
 	return ErrInvalidParameter
 	return ErrInvalidParameter
 }
 }
@@ -87,14 +88,14 @@ func (t *nativeTap) RemoveIP(ip *net.IPNet) error {
 		if bits > 128 || bits < 0 {
 		if bits > 128 || bits < 0 {
 			return ErrInvalidParameter
 			return ErrInvalidParameter
 		}
 		}
-		C.ZT_GoTap_removeIp(t.tap, C.int(AFInet6), unsafe.Pointer(&ip.IP[0]), C.int(bits))
+		C.ZT_GoTap_removeIp(t.tap, C.int(syscall.AF_INET6), unsafe.Pointer(&ip.IP[0]), C.int(bits))
 		return nil
 		return nil
 	}
 	}
 	if len(ip.IP) == 4 {
 	if len(ip.IP) == 4 {
 		if bits > 32 || bits < 0 {
 		if bits > 32 || bits < 0 {
 			return ErrInvalidParameter
 			return ErrInvalidParameter
 		}
 		}
-		C.ZT_GoTap_removeIp(t.tap, C.int(AFInet), unsafe.Pointer(&ip.IP[0]), C.int(bits))
+		C.ZT_GoTap_removeIp(t.tap, C.int(syscall.AF_INET), unsafe.Pointer(&ip.IP[0]), C.int(bits))
 		return nil
 		return nil
 	}
 	}
 	return ErrInvalidParameter
 	return ErrInvalidParameter
@@ -115,7 +116,7 @@ func (t *nativeTap) IPs() (ips []net.IPNet, err error) {
 		af := int(ipbuf[ipptr])
 		af := int(ipbuf[ipptr])
 		ipptr++
 		ipptr++
 		switch af {
 		switch af {
-		case AFInet:
+		case syscall.AF_INET:
 			var ip [4]byte
 			var ip [4]byte
 			for j := 0; j < 4; j++ {
 			for j := 0; j < 4; j++ {
 				ip[j] = ipbuf[ipptr]
 				ip[j] = ipbuf[ipptr]
@@ -124,7 +125,7 @@ func (t *nativeTap) IPs() (ips []net.IPNet, err error) {
 			bits := ipbuf[ipptr]
 			bits := ipbuf[ipptr]
 			ipptr++
 			ipptr++
 			ips = append(ips, net.IPNet{IP: net.IP(ip[:]), Mask: net.CIDRMask(int(bits), 32)})
 			ips = append(ips, net.IPNet{IP: net.IP(ip[:]), Mask: net.CIDRMask(int(bits), 32)})
-		case AFInet6:
+		case syscall.AF_INET6:
 			var ip [16]byte
 			var ip [16]byte
 			for j := 0; j < 16; j++ {
 			for j := 0; j < 16; j++ {
 				ip[j] = ipbuf[ipptr]
 				ip[j] = ipbuf[ipptr]
@@ -168,16 +169,16 @@ func (t *nativeTap) AddRoute(r *Route) error {
 		if len(r.Target.IP) == 4 {
 		if len(r.Target.IP) == 4 {
 			mask, _ := r.Target.Mask.Size()
 			mask, _ := r.Target.Mask.Size()
 			if len(via) == 4 {
 			if len(via) == 4 {
-				rc = int(C.ZT_GoTap_addRoute(t.tap, AFInet, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), AFInet, unsafe.Pointer(&via[0]), C.uint(r.Metric)))
+				rc = int(C.ZT_GoTap_addRoute(t.tap, syscall.AF_INET, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), syscall.AF_INET, unsafe.Pointer(&via[0]), C.uint(r.Metric)))
 			} else {
 			} else {
-				rc = int(C.ZT_GoTap_addRoute(t.tap, AFInet, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), 0, nil, C.uint(r.Metric)))
+				rc = int(C.ZT_GoTap_addRoute(t.tap, syscall.AF_INET, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), 0, nil, C.uint(r.Metric)))
 			}
 			}
 		} else if len(r.Target.IP) == 16 {
 		} else if len(r.Target.IP) == 16 {
 			mask, _ := r.Target.Mask.Size()
 			mask, _ := r.Target.Mask.Size()
 			if len(via) == 16 {
 			if len(via) == 16 {
-				rc = int(C.ZT_GoTap_addRoute(t.tap, AFInet6, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), AFInet6, unsafe.Pointer(&via[0]), C.uint(r.Metric)))
+				rc = int(C.ZT_GoTap_addRoute(t.tap, syscall.AF_INET6, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), syscall.AF_INET6, unsafe.Pointer(&via[0]), C.uint(r.Metric)))
 			} else {
 			} else {
-				rc = int(C.ZT_GoTap_addRoute(t.tap, AFInet6, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), 0, nil, C.uint(r.Metric)))
+				rc = int(C.ZT_GoTap_addRoute(t.tap, syscall.AF_INET6, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), 0, nil, C.uint(r.Metric)))
 			}
 			}
 		}
 		}
 	}
 	}
@@ -198,16 +199,16 @@ func (t *nativeTap) RemoveRoute(r *Route) error {
 		if len(r.Target.IP) == 4 {
 		if len(r.Target.IP) == 4 {
 			mask, _ := r.Target.Mask.Size()
 			mask, _ := r.Target.Mask.Size()
 			if len(via) == 4 {
 			if len(via) == 4 {
-				rc = int(C.ZT_GoTap_removeRoute(t.tap, AFInet, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), AFInet, unsafe.Pointer(&(via[0])), C.uint(r.Metric)))
+				rc = int(C.ZT_GoTap_removeRoute(t.tap, syscall.AF_INET, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), syscall.AF_INET, unsafe.Pointer(&(via[0])), C.uint(r.Metric)))
 			} else {
 			} else {
-				rc = int(C.ZT_GoTap_removeRoute(t.tap, AFInet, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), 0, nil, C.uint(r.Metric)))
+				rc = int(C.ZT_GoTap_removeRoute(t.tap, syscall.AF_INET, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), 0, nil, C.uint(r.Metric)))
 			}
 			}
 		} else if len(r.Target.IP) == 16 {
 		} else if len(r.Target.IP) == 16 {
 			mask, _ := r.Target.Mask.Size()
 			mask, _ := r.Target.Mask.Size()
 			if len(via) == 16 {
 			if len(via) == 16 {
-				rc = int(C.ZT_GoTap_removeRoute(t.tap, AFInet6, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), AFInet6, unsafe.Pointer(&via[0]), C.uint(r.Metric)))
+				rc = int(C.ZT_GoTap_removeRoute(t.tap, syscall.AF_INET6, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), syscall.AF_INET6, unsafe.Pointer(&via[0]), C.uint(r.Metric)))
 			} else {
 			} else {
-				rc = int(C.ZT_GoTap_removeRoute(t.tap, AFInet6, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), 0, nil, C.uint(r.Metric)))
+				rc = int(C.ZT_GoTap_removeRoute(t.tap, syscall.AF_INET6, unsafe.Pointer(&r.Target.IP[0]), C.int(mask), 0, nil, C.uint(r.Metric)))
 			}
 			}
 		}
 		}
 	}
 	}

+ 39 - 29
go/pkg/zerotier/node.go

@@ -35,6 +35,7 @@ import (
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 	"sync/atomic"
 	"sync/atomic"
+	"syscall"
 	"time"
 	"time"
 	"unsafe"
 	"unsafe"
 
 
@@ -65,12 +66,6 @@ const (
 	// CoreVersionBuild is the build version of the ZeroTier core
 	// CoreVersionBuild is the build version of the ZeroTier core
 	CoreVersionBuild int = C.ZEROTIER_ONE_VERSION_BUILD
 	CoreVersionBuild int = C.ZEROTIER_ONE_VERSION_BUILD
 
 
-	// AFInet is the address family for IPv4
-	AFInet = C.AF_INET
-
-	// AFInet6 is the address family for IPv6
-	AFInet6 = C.AF_INET6
-
 	networkConfigOpUp     int = C.ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP
 	networkConfigOpUp     int = C.ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP
 	networkConfigOpUpdate int = C.ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE
 	networkConfigOpUpdate int = C.ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE
 
 
@@ -136,15 +131,16 @@ func NewNode(basePath string) (n *Node, err error) {
 	}
 	}
 	n.localConfigPath = path.Join(basePath, "local.conf")
 	n.localConfigPath = path.Join(basePath, "local.conf")
 
 
-	err = n.localConfig.Read(n.localConfigPath, true)
+	_, identitySecretNotFoundErr := os.Stat(path.Join(basePath,"identity.secret"))
+	err = n.localConfig.Read(n.localConfigPath, true, identitySecretNotFoundErr != nil)
 	if err != nil {
 	if err != nil {
-		return nil, err
+		return
 	}
 	}
 
 
 	if n.localConfig.Settings.LogSizeMax >= 0 {
 	if n.localConfig.Settings.LogSizeMax >= 0 {
-		n.logW, err = sizeLimitWriterOpen(path.Join(basePath, "service.log"))
+		n.logW, err = sizeLimitWriterOpen(path.Join(basePath, "node.log"))
 		if err != nil {
 		if err != nil {
-			return nil, err
+			return
 		}
 		}
 		n.log = log.New(n.logW, "", log.LstdFlags)
 		n.log = log.New(n.logW, "", log.LstdFlags)
 	} else {
 	} else {
@@ -155,50 +151,62 @@ func NewNode(basePath string) (n *Node, err error) {
 		portsChanged := false
 		portsChanged := false
 
 
 		portCheckCount := 0
 		portCheckCount := 0
-		for portCheckCount < 2048 {
+		origPort := n.localConfig.Settings.PrimaryPort
+		for portCheckCount < 256 {
 			portCheckCount++
 			portCheckCount++
 			if checkPort(n.localConfig.Settings.PrimaryPort) {
 			if checkPort(n.localConfig.Settings.PrimaryPort) {
+				if n.localConfig.Settings.PrimaryPort != origPort {
+					n.log.Printf("primary port %d unavailable, found port %d (port search enabled)", origPort, n.localConfig.Settings.PrimaryPort)
+				}
 				break
 				break
 			}
 			}
-			n.log.Printf("primary port %d unavailable, trying next port (port search enabled)", n.localConfig.Settings.PrimaryPort)
-			n.localConfig.Settings.PrimaryPort++
-			n.localConfig.Settings.PrimaryPort &= 0xffff
+			n.localConfig.Settings.PrimaryPort = int(4096 + (randomUInt() % 16384))
 			portsChanged = true
 			portsChanged = true
 		}
 		}
-		if portCheckCount == 2048 {
+		if portCheckCount == 256 {
 			return nil, errors.New("unable to bind to primary port, tried 2048 later ports")
 			return nil, errors.New("unable to bind to primary port, tried 2048 later ports")
 		}
 		}
 
 
 		if n.localConfig.Settings.SecondaryPort > 0 {
 		if n.localConfig.Settings.SecondaryPort > 0 {
 			portCheckCount = 0
 			portCheckCount = 0
-			for portCheckCount < 2048 {
+			origPort = n.localConfig.Settings.SecondaryPort
+			for portCheckCount < 256 {
 				portCheckCount++
 				portCheckCount++
 				if checkPort(n.localConfig.Settings.SecondaryPort) {
 				if checkPort(n.localConfig.Settings.SecondaryPort) {
+					if n.localConfig.Settings.SecondaryPort != origPort {
+						n.log.Printf("secondary port %d unavailable, found port %d (port search enabled)", origPort, n.localConfig.Settings.SecondaryPort)
+					}
 					break
 					break
 				}
 				}
-				n.log.Printf("secondary port %d unavailable, trying next port (port search enabled)", n.localConfig.Settings.SecondaryPort)
-				n.localConfig.Settings.SecondaryPort++
-				n.localConfig.Settings.SecondaryPort &= 0xffff
+				n.log.Printf("secondary port %d unavailable, trying a random port (port search enabled)", n.localConfig.Settings.SecondaryPort)
+				if portCheckCount <= 64 {
+					n.localConfig.Settings.SecondaryPort = unassignedPrivilegedPorts[randomUInt() % uint(len(unassignedPrivilegedPorts))]
+				} else {
+					n.localConfig.Settings.SecondaryPort = int(16384 + (randomUInt() % 16384))
+				}
 				portsChanged = true
 				portsChanged = true
 			}
 			}
-			if portCheckCount == 2048 {
+			if portCheckCount == 256 {
 				n.localConfig.Settings.SecondaryPort = 0
 				n.localConfig.Settings.SecondaryPort = 0
 			}
 			}
 		}
 		}
 
 
 		if n.localConfig.Settings.TertiaryPort > 0 {
 		if n.localConfig.Settings.TertiaryPort > 0 {
 			portCheckCount = 0
 			portCheckCount = 0
-			for portCheckCount < 2048 {
+			origPort = n.localConfig.Settings.TertiaryPort
+			for portCheckCount < 256 {
 				portCheckCount++
 				portCheckCount++
 				if checkPort(n.localConfig.Settings.TertiaryPort) {
 				if checkPort(n.localConfig.Settings.TertiaryPort) {
+					if n.localConfig.Settings.TertiaryPort != origPort {
+						n.log.Printf("tertiary port %d unavailable, found port %d (port search enabled)", origPort, n.localConfig.Settings.TertiaryPort)
+					}
 					break
 					break
 				}
 				}
-				n.log.Printf("tertiary port %d unavailable, trying next port (port search enabled)", n.localConfig.Settings.TertiaryPort)
-				n.localConfig.Settings.TertiaryPort++
-				n.localConfig.Settings.TertiaryPort &= 0xffff
+				n.log.Printf("tertiary port %d unavailable, trying a random port (port search enabled)", n.localConfig.Settings.TertiaryPort)
+				n.localConfig.Settings.TertiaryPort = int(32768 + (randomUInt() % 16384))
 				portsChanged = true
 				portsChanged = true
 			}
 			}
-			if portCheckCount == 2048 {
+			if portCheckCount == 256 {
 				n.localConfig.Settings.TertiaryPort = 0
 				n.localConfig.Settings.TertiaryPort = 0
 			}
 			}
 		}
 		}
@@ -691,9 +699,9 @@ func goPathCheckFunc(gn unsafe.Pointer, ztAddress C.uint64_t, af C.int, ip unsaf
 	node := nodesByUserPtr[uintptr(gn)]
 	node := nodesByUserPtr[uintptr(gn)]
 	nodesByUserPtrLock.RUnlock()
 	nodesByUserPtrLock.RUnlock()
 	var nip net.IP
 	var nip net.IP
-	if af == AFInet {
+	if af == syscall.AF_INET {
 		nip = ((*[4]byte)(ip))[:]
 		nip = ((*[4]byte)(ip))[:]
-	} else if af == AFInet6 {
+	} else if af == syscall.AF_INET6 {
 		nip = ((*[16]byte)(ip))[:]
 		nip = ((*[16]byte)(ip))[:]
 	} else {
 	} else {
 		return 0
 		return 0
@@ -717,16 +725,18 @@ func goPathLookupFunc(gn unsafe.Pointer, ztAddress C.uint64_t, desiredFamily int
 	if err != nil {
 	if err != nil {
 		return 0
 		return 0
 	}
 	}
+
 	ip, port := node.pathLookup(id)
 	ip, port := node.pathLookup(id)
+
 	if len(ip) > 0 && port > 0 && port <= 65535 {
 	if len(ip) > 0 && port > 0 && port <= 65535 {
 		ip4 := ip.To4()
 		ip4 := ip.To4()
 		if len(ip4) == 4 {
 		if len(ip4) == 4 {
-			*((*C.int)(familyP)) = C.int(AFInet)
+			*((*C.int)(familyP)) = C.int(syscall.AF_INET)
 			copy((*[4]byte)(ipP)[:], ip4)
 			copy((*[4]byte)(ipP)[:], ip4)
 			*((*C.int)(portP)) = C.int(port)
 			*((*C.int)(portP)) = C.int(port)
 			return 1
 			return 1
 		} else if len(ip) == 16 {
 		} else if len(ip) == 16 {
-			*((*C.int)(familyP)) = C.int(AFInet6)
+			*((*C.int)(familyP)) = C.int(syscall.AF_INET6)
 			copy((*[16]byte)(ipP)[:], ip)
 			copy((*[16]byte)(ipP)[:], ip)
 			*((*C.int)(portP)) = C.int(port)
 			*((*C.int)(portP)) = C.int(port)
 			return 1
 			return 1

+ 3 - 4
go/pkg/zerotier/root.go

@@ -6,9 +6,8 @@ type Root struct {
 	Identity Identity `json:"identity"`
 	Identity Identity `json:"identity"`
 
 
 	// Locator describes the endpoints where this root may be found.
 	// Locator describes the endpoints where this root may be found.
-	Locator  Locator `json:"locator"`
+	Locator  Locator `json:"locator,omitempty"`
 
 
-	// URL is an optional URL where the latest Root may be fetched.
-	// This is one method of locator update, while in-band mechanisms are the other.
-	URL      string `json:"url"`
+	// Bootstrap is an array of IP/port locations where this root might be found if a locator is not known.
+	Bootstrap []InetAddress `json:"bootstrap,omitempty"`
 }
 }

+ 23 - 8
include/ZeroTierCore.h

@@ -50,8 +50,11 @@ extern "C" {
 
 
 /**
 /**
  * Default UDP port for devices running a ZeroTier endpoint
  * Default UDP port for devices running a ZeroTier endpoint
+ *
+ * NOTE: as of V2 this has changed to 893 since many NATs (even symmetric)
+ * treat privileged ports in a special way. The old default was 9993.
  */
  */
-#define ZT_DEFAULT_PORT 9993
+#define ZT_DEFAULT_PORT 893
 
 
 /**
 /**
  * Minimum MTU, which is the minimum allowed by IPv6 and several specs
  * Minimum MTU, which is the minimum allowed by IPv6 and several specs
@@ -108,7 +111,10 @@ extern "C" {
 #define ZT_MAX_ZT_ASSIGNED_ADDRESSES 32
 #define ZT_MAX_ZT_ASSIGNED_ADDRESSES 32
 
 
 /**
 /**
- * Maximum number of "specialists" on a network -- bridges, anchors, etc.
+ * Maximum number of "specialists" on a network -- bridges, etc.
+ *
+ * A specialist is a node tagged with some special role like acting as
+ * a promiscuous bridge, open relay, administrator, etc.
  */
  */
 #define ZT_MAX_NETWORK_SPECIALISTS 256
 #define ZT_MAX_NETWORK_SPECIALISTS 256
 
 
@@ -134,6 +140,10 @@ extern "C" {
 
 
 /**
 /**
  * Maximum number of direct network paths to a given peer
  * Maximum number of direct network paths to a given peer
+ *
+ * Note that dual-stack configs may end up resulting in both IPv6 and IPv4
+ * paths existing. This gives enough headroom for multipath configs with
+ * dual stacks across the board.
  */
  */
 #define ZT_MAX_PEER_NETWORK_PATHS 16
 #define ZT_MAX_PEER_NETWORK_PATHS 16
 
 
@@ -144,11 +154,18 @@ extern "C" {
 
 
 /**
 /**
  * Maximum number of rules per capability object
  * Maximum number of rules per capability object
+ *
+ * Capabilities normally contain only a few rules. The rules in a capability
+ * should be short and to the point.
  */
  */
 #define ZT_MAX_CAPABILITY_RULES 64
 #define ZT_MAX_CAPABILITY_RULES 64
 
 
 /**
 /**
  * Maximum number of certificates of ownership to assign to a single network member
  * Maximum number of certificates of ownership to assign to a single network member
+ *
+ * Network members can have more than four IPs, etc., but right now there
+ * is a protocol limit on how many COOs can be assigned. If your config needs
+ * more than four authenticated IPs per node you may have personal problems.
  */
  */
 #define ZT_MAX_CERTIFICATES_OF_OWNERSHIP 4
 #define ZT_MAX_CERTIFICATES_OF_OWNERSHIP 4
 
 
@@ -159,14 +176,11 @@ extern "C" {
 
 
 /**
 /**
  * Maximum number of multicast group subscriptions on a local virtual network interface
  * Maximum number of multicast group subscriptions on a local virtual network interface
+ *
+ * This coincides with many operating systems' maximum values and is rather huge.
  */
  */
 #define ZT_MAX_MULTICAST_SUBSCRIPTIONS 1024
 #define ZT_MAX_MULTICAST_SUBSCRIPTIONS 1024
 
 
-/**
- * Maximum value for link quality (min is 0)
- */
-#define ZT_PATH_LINK_QUALITY_MAX 255
-
 /* Rule specification contants **********************************************/
 /* Rule specification contants **********************************************/
 
 
 /**
 /**
@@ -1395,8 +1409,9 @@ ZT_SDK_API enum ZT_ResultCode ZT_Node_new(ZT_Node **node,void *uptr,void *tptr,c
  * first. This can crash if processXXX() methods are in progress.
  * first. This can crash if processXXX() methods are in progress.
  *
  *
  * @param node Node to delete
  * @param node Node to delete
+ * @param tptr Thread pointer to pass to functions/callbacks resulting from this call
  */
  */
-ZT_SDK_API void ZT_Node_delete(ZT_Node *node);
+ZT_SDK_API void ZT_Node_delete(ZT_Node *node,void *tptr);
 
 
 /**
 /**
  * Process a packet received from the physical wire
  * Process a packet received from the physical wire

+ 1 - 1
node/Constants.hpp

@@ -173,7 +173,7 @@
 /**
 /**
  * Interval between direct path pushes in milliseconds if we don't have a path
  * Interval between direct path pushes in milliseconds if we don't have a path
  */
  */
-#define ZT_DIRECT_PATH_PUSH_INTERVAL 15000
+#define ZT_DIRECT_PATH_PUSH_INTERVAL 30000
 
 
 /**
 /**
  * Interval between direct path pushes in milliseconds if we already have a path
  * Interval between direct path pushes in milliseconds if we already have a path

+ 53 - 58
node/Endpoint.cpp

@@ -53,13 +53,15 @@ bool Endpoint::operator<(const Endpoint &ep) const
 int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const
 int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const
 {
 {
 	int p;
 	int p;
+	data[0] = (uint8_t)_t;
+	Utils::storeBigEndian(data + 1,(int16_t)_l[0]);
+	Utils::storeBigEndian(data + 3,(int16_t)_l[1]);
+	Utils::storeBigEndian(data + 5,(int16_t)_l[2]);
 	switch(_t) {
 	switch(_t) {
 		case INETADDR:
 		case INETADDR:
-			data[0] = (uint8_t)INETADDR;
-			return 1 + reinterpret_cast<const InetAddress *>(&_v.sa)->marshal(data+1);
+			return 7 + reinterpret_cast<const InetAddress *>(&_v.sa)->marshal(data+1);
 		case DNSNAME:
 		case DNSNAME:
-			data[0] = (uint8_t)DNSNAME;
-			p = 1;
+			p = 7;
 			for (;;) {
 			for (;;) {
 				if ((data[p] = (uint8_t)_v.dns.name[p-1]) == 0)
 				if ((data[p] = (uint8_t)_v.dns.name[p-1]) == 0)
 					break;
 					break;
@@ -71,17 +73,15 @@ int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const
 			data[p++] = (uint8_t)_v.dns.port;
 			data[p++] = (uint8_t)_v.dns.port;
 			return p;
 			return p;
 		case ZEROTIER:
 		case ZEROTIER:
-			data[0] = (uint8_t)ZEROTIER;
-			data[1] = (uint8_t)(_v.zt.a >> 32U);
-			data[2] = (uint8_t)(_v.zt.a >> 24U);
-			data[3] = (uint8_t)(_v.zt.a >> 16U);
-			data[4] = (uint8_t)(_v.zt.a >> 8U);
-			data[5] = (uint8_t)_v.zt.a;
-			memcpy(data + 6,_v.zt.idh,ZT_IDENTITY_HASH_SIZE);
-			return (ZT_IDENTITY_HASH_SIZE + 6);
+			data[7] = (uint8_t)(_v.zt.a >> 32U);
+			data[8] = (uint8_t)(_v.zt.a >> 24U);
+			data[9] = (uint8_t)(_v.zt.a >> 16U);
+			data[10] = (uint8_t)(_v.zt.a >> 8U);
+			data[11] = (uint8_t)_v.zt.a;
+			memcpy(data + 12,_v.zt.idh,ZT_IDENTITY_HASH_SIZE);
+			return ZT_IDENTITY_HASH_SIZE + 12;
 		case URL:
 		case URL:
-			data[0] = (uint8_t)URL;
-			p = 1;
+			p = 7;
 			for (;;) {
 			for (;;) {
 				if ((data[p] = (uint8_t)_v.url[p-1]) == 0)
 				if ((data[p] = (uint8_t)_v.url[p-1]) == 0)
 					break;
 					break;
@@ -91,65 +91,63 @@ int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const
 			}
 			}
 			return p;
 			return p;
 		case ETHERNET:
 		case ETHERNET:
-			data[0] = (uint8_t)ETHERNET;
-			data[1] = (uint8_t)(_v.eth >> 40U);
-			data[2] = (uint8_t)(_v.eth >> 32U);
-			data[3] = (uint8_t)(_v.eth >> 24U);
-			data[4] = (uint8_t)(_v.eth >> 16U);
-			data[5] = (uint8_t)(_v.eth >> 8U);
-			data[6] = (uint8_t)_v.eth;
-			return 7;
+			data[7] = (uint8_t)(_v.eth >> 40U);
+			data[8] = (uint8_t)(_v.eth >> 32U);
+			data[9] = (uint8_t)(_v.eth >> 24U);
+			data[10] = (uint8_t)(_v.eth >> 16U);
+			data[11] = (uint8_t)(_v.eth >> 8U);
+			data[12] = (uint8_t)_v.eth;
+			return 13;
 		default:
 		default:
 			data[0] = (uint8_t)NIL;
 			data[0] = (uint8_t)NIL;
-			return 1;
+			return 7;
 	}
 	}
 }
 }
 
 
 int Endpoint::unmarshal(const uint8_t *restrict data,const int len)
 int Endpoint::unmarshal(const uint8_t *restrict data,const int len)
 {
 {
-	if (len <= 0)
+	if (len < 7)
 		return -1;
 		return -1;
 	int p;
 	int p;
-	switch((Type)data[0]) {
+	_t = (Type)data[0];
+	_l[0] = Utils::loadBigEndian<int16_t>(data + 1);
+	_l[1] = Utils::loadBigEndian<int16_t>(data + 3);
+	_l[2] = Utils::loadBigEndian<int16_t>(data + 5);
+  switch(_t) {
 		case NIL:
 		case NIL:
-			_t = NIL;
-			return 1;
+			return 7;
 		case INETADDR:
 		case INETADDR:
-			_t = INETADDR;
-			return reinterpret_cast<InetAddress *>(&_v.sa)->unmarshal(data+1,len-1);
+			return 7 + reinterpret_cast<InetAddress *>(&_v.sa)->unmarshal(data+7,len-7);
 		case DNSNAME:
 		case DNSNAME:
-			if (len < 4)
+			if (len < 10)
 				return -1;
 				return -1;
-			_t = DNSNAME;
-			p = 1;
+			p = 7;
 			for (;;) {
 			for (;;) {
 				if ((_v.dns.name[p-1] = (char)data[p]) == 0) {
 				if ((_v.dns.name[p-1] = (char)data[p]) == 0) {
 					++p;
 					++p;
 					break;
 					break;
 				}
 				}
 				++p;
 				++p;
-				if ((p >= (ZT_ENDPOINT_MAX_NAME_SIZE+1))||(p >= (len-2)))
+				if ((p >= (ZT_ENDPOINT_MARSHAL_SIZE_MAX-2))||(p >= (len-2)))
 					return -1;
 					return -1;
 			}
 			}
 			_v.dns.port = (uint16_t)(((unsigned int)data[p++]) << 8U);
 			_v.dns.port = (uint16_t)(((unsigned int)data[p++]) << 8U);
 			_v.dns.port |= (uint16_t)data[p++];
 			_v.dns.port |= (uint16_t)data[p++];
 			return p;
 			return p;
 		case ZEROTIER:
 		case ZEROTIER:
-			if (len < (ZT_IDENTITY_HASH_SIZE + 6))
+			if (len < 60)
 				return -1;
 				return -1;
-			_t = ZEROTIER;
-			_v.zt.a = ((uint64_t)data[1]) << 32U;
-			_v.zt.a |= ((uint64_t)data[2]) << 24U;
-			_v.zt.a |= ((uint64_t)data[3]) << 16U;
-			_v.zt.a |= ((uint64_t)data[4]) << 8U;
-			_v.zt.a |= (uint64_t)data[5];
-			memcpy(_v.zt.idh,data + 6,ZT_IDENTITY_HASH_SIZE);
-			return (ZT_IDENTITY_HASH_SIZE + 6);
+			_v.zt.a = ((uint64_t)data[7]) << 32U;
+			_v.zt.a |= ((uint64_t)data[8]) << 24U;
+			_v.zt.a |= ((uint64_t)data[9]) << 16U;
+			_v.zt.a |= ((uint64_t)data[10]) << 8U;
+			_v.zt.a |= (uint64_t)data[11];
+			memcpy(_v.zt.idh,data + 12,48);
+			return 60;
 		case URL:
 		case URL:
-			if (len < 2)
+			if (len < 8)
 				return -1;
 				return -1;
-			_t = URL;
-			p = 1;
+			p = 7;
 			for (;;) {
 			for (;;) {
 				if ((_v.url[p-1] = (char)data[p]) == 0) {
 				if ((_v.url[p-1] = (char)data[p]) == 0) {
 					++p;
 					++p;
@@ -161,25 +159,22 @@ int Endpoint::unmarshal(const uint8_t *restrict data,const int len)
 			}
 			}
 			return p;
 			return p;
 		case ETHERNET:
 		case ETHERNET:
-			if (len < 7)
+			if (len < 13)
 				return -1;
 				return -1;
-			_t = ZEROTIER;
-			_v.eth = ((uint64_t)data[1]) << 40U;
-			_v.eth |= ((uint64_t)data[2]) << 32U;
-			_v.eth |= ((uint64_t)data[3]) << 24U;
-			_v.eth |= ((uint64_t)data[4]) << 16U;
-			_v.eth |= ((uint64_t)data[5]) << 8U;
-			_v.eth |= (uint64_t)data[6];
-			return 7;
+			_v.eth = ((uint64_t)data[7]) << 40U;
+			_v.eth |= ((uint64_t)data[8]) << 32U;
+			_v.eth |= ((uint64_t)data[9]) << 24U;
+			_v.eth |= ((uint64_t)data[10]) << 16U;
+			_v.eth |= ((uint64_t)data[11]) << 8U;
+			_v.eth |= (uint64_t)data[12];
+			return 13;
 		default:
 		default:
 			// Unrecognized endpoint types not yet specified must start with a byte
 			// Unrecognized endpoint types not yet specified must start with a byte
 			// length size so that older versions of ZeroTier can skip them.
 			// length size so that older versions of ZeroTier can skip them.
-			if (len < 2)
+			if (len < 8)
 				return -1;
 				return -1;
-			_t = UNRECOGNIZED;
-			return 1 + (int)data[1];
+			return 8 + (int)data[7];
 	}
 	}
-	return false;
 }
 }
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 8 - 3
node/Endpoint.hpp

@@ -24,12 +24,16 @@
 #include "Address.hpp"
 #include "Address.hpp"
 #include "Utils.hpp"
 #include "Utils.hpp"
 
 
-#define ZT_ENDPOINT_MARSHAL_SIZE_MAX (ZT_ENDPOINT_MAX_NAME_SIZE+3)
+// max name size + type byte + port (for DNS name/port) + 3x 16-bit coordinate for location
+#define ZT_ENDPOINT_MARSHAL_SIZE_MAX (ZT_ENDPOINT_MAX_NAME_SIZE+1+2+2+2+2)
 
 
 namespace ZeroTier {
 namespace ZeroTier {
 
 
 /**
 /**
  * Endpoint variant specifying some form of network endpoint
  * Endpoint variant specifying some form of network endpoint
+ *
+ * This data structure supports a number of types that are not yet actually used:
+ * DNSNAME, URL, and ETHERNET. These are present to reserve them for future use.
  */
  */
 class Endpoint
 class Endpoint
 {
 {
@@ -52,10 +56,10 @@ public:
 	ZT_ALWAYS_INLINE Endpoint(const char *name,const int port) : _t(DNSNAME) { Utils::scopy(_v.dns.name,sizeof(_v.dns.name),name); _v.dns.port = port; }
 	ZT_ALWAYS_INLINE Endpoint(const char *name,const int port) : _t(DNSNAME) { Utils::scopy(_v.dns.name,sizeof(_v.dns.name),name); _v.dns.port = port; }
 	explicit ZT_ALWAYS_INLINE Endpoint(const char *url) : _t(URL) { Utils::scopy(_v.url,sizeof(_v.url),url); }
 	explicit ZT_ALWAYS_INLINE Endpoint(const char *url) : _t(URL) { Utils::scopy(_v.url,sizeof(_v.url),url); }
 
 
-	ZT_ALWAYS_INLINE const InetAddress *sockaddr() const { return (_t == INETADDR) ? reinterpret_cast<const InetAddress *>(&_v.sa) : nullptr; }
+	ZT_ALWAYS_INLINE const InetAddress *inetAddr() const { return (_t == INETADDR) ? reinterpret_cast<const InetAddress *>(&_v.sa) : nullptr; }
 	ZT_ALWAYS_INLINE const char *dnsName() const { return (_t == DNSNAME) ? _v.dns.name : nullptr; }
 	ZT_ALWAYS_INLINE const char *dnsName() const { return (_t == DNSNAME) ? _v.dns.name : nullptr; }
 	ZT_ALWAYS_INLINE int dnsPort() const { return (_t == DNSNAME) ? _v.dns.port : -1; }
 	ZT_ALWAYS_INLINE int dnsPort() const { return (_t == DNSNAME) ? _v.dns.port : -1; }
-	ZT_ALWAYS_INLINE Address ztAddress() const { return (_t == ZEROTIER) ? Address(_v.zt.a) : Address(); }
+	ZT_ALWAYS_INLINE Address ztAddress() const { return Address((_t == ZEROTIER) ? _v.zt.a : (uint64_t)0); }
 	ZT_ALWAYS_INLINE const uint8_t *ztIdentityHash() const { return (_t == ZEROTIER) ? _v.zt.idh : nullptr; }
 	ZT_ALWAYS_INLINE const uint8_t *ztIdentityHash() const { return (_t == ZEROTIER) ? _v.zt.idh : nullptr; }
 	ZT_ALWAYS_INLINE const char *url() const { return (_t == URL) ? _v.url : nullptr; }
 	ZT_ALWAYS_INLINE const char *url() const { return (_t == URL) ? _v.url : nullptr; }
 	ZT_ALWAYS_INLINE MAC ethernet() const { return (_t == ETHERNET) ? MAC(_v.eth) : MAC(); }
 	ZT_ALWAYS_INLINE MAC ethernet() const { return (_t == ETHERNET) ? MAC(_v.eth) : MAC(); }
@@ -75,6 +79,7 @@ public:
 
 
 private:
 private:
 	Type _t;
 	Type _t;
+	int _l[3]; // X,Y,Z location in kilometers from the nearest gravitational center of mass
 	union {
 	union {
 		struct sockaddr_storage sa;
 		struct sockaddr_storage sa;
 		struct {
 		struct {

+ 28 - 40
node/Hashtable.hpp

@@ -16,8 +16,8 @@
 
 
 #include "Constants.hpp"
 #include "Constants.hpp"
 
 
-#include <stdlib.h>
-#include <stdio.h>
+#include <cstdlib>
+#include <stdexcept>
 #include <vector>
 #include <vector>
 
 
 namespace ZeroTier {
 namespace ZeroTier {
@@ -94,44 +94,29 @@ public:
 	/**
 	/**
 	 * @param bc Initial capacity in buckets (default: 32, must be nonzero)
 	 * @param bc Initial capacity in buckets (default: 32, must be nonzero)
 	 */
 	 */
-	inline Hashtable(unsigned long bc = 32) :
+	ZT_ALWAYS_INLINE Hashtable(unsigned long bc = 32) :
 		_t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * bc))),
 		_t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * bc))),
 		_bc(bc),
 		_bc(bc),
 		_s(0)
 		_s(0)
 	{
 	{
 		if (!_t)
 		if (!_t)
-			throw ZT_EXCEPTION_OUT_OF_MEMORY;
-		for(unsigned long i=0;i<bc;++i)
-			_t[i] = (_Bucket *)0;
+			throw std::bad_alloc();
+		memset(_t,0,sizeof(_Bucket *) * bc);
 	}
 	}
 
 
-	inline Hashtable(const Hashtable<K,V> &ht) :
-		_t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * ht._bc))),
-		_bc(ht._bc),
-		_s(ht._s)
+	ZT_ALWAYS_INLINE Hashtable(const Hashtable<K,V> &ht) :
+		Hashtable()
 	{
 	{
-		if (!_t)
-			throw ZT_EXCEPTION_OUT_OF_MEMORY;
-		for(unsigned long i=0;i<_bc;++i)
-			_t[i] = (_Bucket *)0;
-		for(unsigned long i=0;i<_bc;++i) {
-			const _Bucket *b = ht._t[i];
-			while (b) {
-				_Bucket *nb = new _Bucket(*b);
-				nb->next = _t[i];
-				_t[i] = nb;
-				b = b->next;
-			}
-		}
+		*this = ht;
 	}
 	}
 
 
-	inline ~Hashtable()
+	ZT_ALWAYS_INLINE ~Hashtable()
 	{
 	{
 		this->clear();
 		this->clear();
 		::free(_t);
 		::free(_t);
 	}
 	}
 
 
-	inline Hashtable &operator=(const Hashtable<K,V> &ht)
+	ZT_ALWAYS_INLINE Hashtable &operator=(const Hashtable<K,V> &ht)
 	{
 	{
 		this->clear();
 		this->clear();
 		if (ht._s) {
 		if (ht._s) {
@@ -149,7 +134,7 @@ public:
 	/**
 	/**
 	 * Erase all entries
 	 * Erase all entries
 	 */
 	 */
-	inline void clear()
+	ZT_ALWAYS_INLINE void clear()
 	{
 	{
 		if (_s) {
 		if (_s) {
 			for(unsigned long i=0;i<_bc;++i) {
 			for(unsigned long i=0;i<_bc;++i) {
@@ -168,7 +153,7 @@ public:
 	/**
 	/**
 	 * @return Vector of all keys
 	 * @return Vector of all keys
 	 */
 	 */
-	inline typename std::vector<K> keys() const
+	ZT_ALWAYS_INLINE typename std::vector<K> keys() const
 	{
 	{
 		typename std::vector<K> k;
 		typename std::vector<K> k;
 		if (_s) {
 		if (_s) {
@@ -191,7 +176,7 @@ public:
 	 * @tparam Type of V (generally inferred)
 	 * @tparam Type of V (generally inferred)
 	 */
 	 */
 	template<typename C>
 	template<typename C>
-	inline void appendKeys(C &v) const
+	ZT_ALWAYS_INLINE void appendKeys(C &v) const
 	{
 	{
 		if (_s) {
 		if (_s) {
 			for(unsigned long i=0;i<_bc;++i) {
 			for(unsigned long i=0;i<_bc;++i) {
@@ -207,7 +192,7 @@ public:
 	/**
 	/**
 	 * @return Vector of all entries (pairs of K,V)
 	 * @return Vector of all entries (pairs of K,V)
 	 */
 	 */
-	inline typename std::vector< std::pair<K,V> > entries() const
+	ZT_ALWAYS_INLINE typename std::vector< std::pair<K,V> > entries() const
 	{
 	{
 		typename std::vector< std::pair<K,V> > k;
 		typename std::vector< std::pair<K,V> > k;
 		if (_s) {
 		if (_s) {
@@ -227,7 +212,7 @@ public:
 	 * @param k Key
 	 * @param k Key
 	 * @return Pointer to value or NULL if not found
 	 * @return Pointer to value or NULL if not found
 	 */
 	 */
-	inline V *get(const K k)
+	ZT_ALWAYS_INLINE V *get(const K k)
 	{
 	{
 		_Bucket *b = _t[_hc(k) % _bc];
 		_Bucket *b = _t[_hc(k) % _bc];
 		while (b) {
 		while (b) {
@@ -237,14 +222,14 @@ public:
 		}
 		}
 		return (V *)0;
 		return (V *)0;
 	}
 	}
-	inline const V *get(const K k) const { return const_cast<Hashtable *>(this)->get(k); }
+	ZT_ALWAYS_INLINE const V *get(const K k) const { return const_cast<Hashtable *>(this)->get(k); }
 
 
 	/**
 	/**
 	 * @param k Key
 	 * @param k Key
 	 * @param v Value to fill with result
 	 * @param v Value to fill with result
 	 * @return True if value was found and set (if false, v is not modified)
 	 * @return True if value was found and set (if false, v is not modified)
 	 */
 	 */
-	inline bool get(const K &k,V &v) const
+	ZT_ALWAYS_INLINE bool get(const K &k,V &v) const
 	{
 	{
 		_Bucket *b = _t[_hc(k) % _bc];
 		_Bucket *b = _t[_hc(k) % _bc];
 		while (b) {
 		while (b) {
@@ -261,7 +246,7 @@ public:
 	 * @param k Key to check
 	 * @param k Key to check
 	 * @return True if key is present
 	 * @return True if key is present
 	 */
 	 */
-	inline bool contains(const K &k) const
+	ZT_ALWAYS_INLINE bool contains(const K &k) const
 	{
 	{
 		_Bucket *b = _t[_hc(k) % _bc];
 		_Bucket *b = _t[_hc(k) % _bc];
 		while (b) {
 		while (b) {
@@ -276,7 +261,7 @@ public:
 	 * @param k Key
 	 * @param k Key
 	 * @return True if value was present
 	 * @return True if value was present
 	 */
 	 */
-	inline bool erase(const K &k)
+	ZT_ALWAYS_INLINE bool erase(const K &k)
 	{
 	{
 		const unsigned long bidx = _hc(k) % _bc;
 		const unsigned long bidx = _hc(k) % _bc;
 		_Bucket *lastb = (_Bucket *)0;
 		_Bucket *lastb = (_Bucket *)0;
@@ -301,7 +286,7 @@ public:
 	 * @param v Value
 	 * @param v Value
 	 * @return Reference to value in table
 	 * @return Reference to value in table
 	 */
 	 */
-	inline V &set(const K &k,const V &v)
+	ZT_ALWAYS_INLINE V &set(const K &k,const V &v)
 	{
 	{
 		const unsigned long h = _hc(k);
 		const unsigned long h = _hc(k);
 		unsigned long bidx = h % _bc;
 		unsigned long bidx = h % _bc;
@@ -331,7 +316,7 @@ public:
 	 * @param k Key
 	 * @param k Key
 	 * @return Value, possibly newly created
 	 * @return Value, possibly newly created
 	 */
 	 */
-	inline V &operator[](const K k)
+	ZT_ALWAYS_INLINE V &operator[](const K k)
 	{
 	{
 		const unsigned long h = _hc(k);
 		const unsigned long h = _hc(k);
 		unsigned long bidx = h % _bc;
 		unsigned long bidx = h % _bc;
@@ -368,15 +353,18 @@ public:
 private:
 private:
 	template<typename O>
 	template<typename O>
 	static ZT_ALWAYS_INLINE unsigned long _hc(const O &obj) { return (unsigned long)obj.hashCode(); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(const O &obj) { return (unsigned long)obj.hashCode(); }
-
-	static ZT_ALWAYS_INLINE unsigned long _hc(const uint64_t i) { return (unsigned long)(i ^ (i >> 32)); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const uint64_t i) { return (unsigned long)(i ^ (i >> 32U)); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(const uint32_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(const uint32_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(const uint16_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(const uint16_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
-	static ZT_ALWAYS_INLINE unsigned long _hc(const int i) { return ((unsigned long)i * (unsigned long)0x9e3379b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const uint8_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const int64_t i) { return (unsigned long)(i ^ (i >> 32U)); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const int32_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const int16_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
+	static ZT_ALWAYS_INLINE unsigned long _hc(const int8_t i) { return ((unsigned long)i * (unsigned long)0x9e3779b1); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(void *p) { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(void *p) { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(const void *p) { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
 	static ZT_ALWAYS_INLINE unsigned long _hc(const void *p) { return ((unsigned long)((uintptr_t)p) * (unsigned long)0x9e3779b1); }
 
 
-	inline void _grow()
+	ZT_ALWAYS_INLINE void _grow()
 	{
 	{
 		const unsigned long nc = _bc * 2;
 		const unsigned long nc = _bc * 2;
 		_Bucket **nt = reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * nc));
 		_Bucket **nt = reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * nc));

+ 13 - 1
node/InetAddress.hpp

@@ -32,6 +32,8 @@ namespace ZeroTier {
  */
  */
 #define ZT_INETADDRESS_MAX_SCOPE 7
 #define ZT_INETADDRESS_MAX_SCOPE 7
 
 
+#define ZT_INETADDRESS_MARSHAL_SIZE_MAX 19
+
 /**
 /**
  * Extends sockaddr_storage with friendly C++ methods
  * Extends sockaddr_storage with friendly C++ methods
  *
  *
@@ -463,7 +465,7 @@ public:
 	 */
 	 */
 	explicit ZT_ALWAYS_INLINE operator bool() const { return (ss_family != 0); }
 	explicit ZT_ALWAYS_INLINE operator bool() const { return (ss_family != 0); }
 
 
-	static ZT_ALWAYS_INLINE int marshalSizeMax() { return 19; }
+	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_INETADDRESS_MARSHAL_SIZE_MAX; }
 	int marshal(uint8_t data[19]) const;
 	int marshal(uint8_t data[19]) const;
 	int unmarshal(const uint8_t *restrict data,const int len);
 	int unmarshal(const uint8_t *restrict data,const int len);
 
 
@@ -588,9 +590,19 @@ public:
 static ZT_ALWAYS_INLINE InetAddress *asInetAddress(sockaddr_in *p) { return reinterpret_cast<InetAddress *>(p); }
 static ZT_ALWAYS_INLINE InetAddress *asInetAddress(sockaddr_in *p) { return reinterpret_cast<InetAddress *>(p); }
 static ZT_ALWAYS_INLINE InetAddress *asInetAddress(sockaddr_in6 *p) { return reinterpret_cast<InetAddress *>(p); }
 static ZT_ALWAYS_INLINE InetAddress *asInetAddress(sockaddr_in6 *p) { return reinterpret_cast<InetAddress *>(p); }
 static ZT_ALWAYS_INLINE InetAddress *asInetAddress(sockaddr *p) { return reinterpret_cast<InetAddress *>(p); }
 static ZT_ALWAYS_INLINE InetAddress *asInetAddress(sockaddr *p) { return reinterpret_cast<InetAddress *>(p); }
+static ZT_ALWAYS_INLINE InetAddress *asInetAddress(sockaddr_storage *p) { return reinterpret_cast<InetAddress *>(p); }
 static ZT_ALWAYS_INLINE const InetAddress *asInetAddress(const sockaddr_in *p) { return reinterpret_cast<const InetAddress *>(p); }
 static ZT_ALWAYS_INLINE const InetAddress *asInetAddress(const sockaddr_in *p) { return reinterpret_cast<const InetAddress *>(p); }
 static ZT_ALWAYS_INLINE const InetAddress *asInetAddress(const sockaddr_in6 *p) { return reinterpret_cast<const InetAddress *>(p); }
 static ZT_ALWAYS_INLINE const InetAddress *asInetAddress(const sockaddr_in6 *p) { return reinterpret_cast<const InetAddress *>(p); }
 static ZT_ALWAYS_INLINE const InetAddress *asInetAddress(const sockaddr *p) { return reinterpret_cast<const InetAddress *>(p); }
 static ZT_ALWAYS_INLINE const InetAddress *asInetAddress(const sockaddr *p) { return reinterpret_cast<const InetAddress *>(p); }
+static ZT_ALWAYS_INLINE const InetAddress *asInetAddress(const sockaddr_storage *p) { return reinterpret_cast<const InetAddress *>(p); }
+static ZT_ALWAYS_INLINE InetAddress &asInetAddress(sockaddr_in &p) { return *reinterpret_cast<InetAddress *>(&p); }
+static ZT_ALWAYS_INLINE InetAddress &asInetAddress(sockaddr_in6 &p) { return *reinterpret_cast<InetAddress *>(&p); }
+static ZT_ALWAYS_INLINE InetAddress &asInetAddress(sockaddr &p) { return *reinterpret_cast<InetAddress *>(&p); }
+static ZT_ALWAYS_INLINE InetAddress &asInetAddress(sockaddr_storage &p) { return *reinterpret_cast<InetAddress *>(&p); }
+static ZT_ALWAYS_INLINE const InetAddress &asInetAddress(const sockaddr_in &p) { return *reinterpret_cast<const InetAddress *>(&p); }
+static ZT_ALWAYS_INLINE const InetAddress &asInetAddress(const sockaddr_in6 &p) { return *reinterpret_cast<const InetAddress *>(&p); }
+static ZT_ALWAYS_INLINE const InetAddress &asInetAddress(const sockaddr &p) { return *reinterpret_cast<const InetAddress *>(&p); }
+static ZT_ALWAYS_INLINE const InetAddress &asInetAddress(const sockaddr_storage &p) { return *reinterpret_cast<const InetAddress *>(&p); }
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier
 
 

+ 44 - 38
node/Locator.cpp

@@ -42,23 +42,25 @@ int Locator::marshal(uint8_t data[ZT_LOCATOR_MARSHAL_SIZE_MAX],const bool exclud
 	if ((_endpointCount > ZT_LOCATOR_MAX_ENDPOINTS)||(_signatureLength > ZT_SIGNATURE_BUFFER_SIZE))
 	if ((_endpointCount > ZT_LOCATOR_MAX_ENDPOINTS)||(_signatureLength > ZT_SIGNATURE_BUFFER_SIZE))
 		return -1;
 		return -1;
 
 
-	Utils::putUInt64(data,(uint64_t)_ts);
+	Utils::storeBigEndian<int64_t>(data,_ts);
 	int p = 8;
 	int p = 8;
 
 
-	data[p++] = (uint8_t)(_endpointCount >> 8U);
-	data[p++] = (uint8_t)_endpointCount;
-	for(unsigned int i=0;i<_endpointCount;++i) {
-		int tmp = _at[i].marshal(data + p);
-		if (tmp < 0)
-			return -1;
-		p += tmp;
-	}
+	if (_ts > 0) {
+		data[p++] = (uint8_t)(_endpointCount >> 8U);
+		data[p++] = (uint8_t)_endpointCount;
+		for (unsigned int i = 0; i < _endpointCount; ++i) {
+			int tmp = _at[i].marshal(data + p);
+			if (tmp < 0)
+				return -1;
+			p += tmp;
+		}
 
 
-	if (!excludeSignature) {
-		data[p++] = (uint8_t)(_signatureLength >> 8U);
-		data[p++] = (uint8_t)_signatureLength;
-		memcpy(data + p,_signature,_signatureLength);
-		p += (int)_signatureLength;
+		if (!excludeSignature) {
+			data[p++] = (uint8_t)(_signatureLength >> 8U);
+			data[p++] = (uint8_t)_signatureLength;
+			memcpy(data + p,_signature,_signatureLength);
+			p += (int)_signatureLength;
+		}
 	}
 	}
 
 
 	return p;
 	return p;
@@ -69,34 +71,38 @@ int Locator::unmarshal(const uint8_t *restrict data,const int len)
 	if (len <= (8 + 2 + 48))
 	if (len <= (8 + 2 + 48))
 		return -1;
 		return -1;
 
 
-	_ts = (int64_t)Utils::readUInt64(data);
+	_ts = Utils::loadBigEndian<int64_t>(data);
 	int p = 8;
 	int p = 8;
 
 
-	unsigned int ec = (int)data[p++];
-	ec <<= 8U;
-	ec |= data[p++];
-	if (ec > ZT_LOCATOR_MAX_ENDPOINTS)
-		return -1;
-	_endpointCount = ec;
-	for(int i=0;i<ec;++i) {
-		int tmp = _at[i].unmarshal(data + p,len - p);
-		if (tmp < 0)
+	if (_ts > 0) {
+		unsigned int ec = (int)data[p++];
+		ec <<= 8U;
+		ec |= data[p++];
+		if (ec > ZT_LOCATOR_MAX_ENDPOINTS)
 			return -1;
 			return -1;
-		p += tmp;
-	}
+		_endpointCount = ec;
+		for (int i = 0; i < ec; ++i) {
+			int tmp = _at[i].unmarshal(data + p,len - p);
+			if (tmp < 0)
+				return -1;
+			p += tmp;
+		}
 
 
-	if ((p + 2) > len)
-		return -1;
-	unsigned int sl = data[p++];
-	sl <<= 8U;
-	sl |= data[p++];
-	if (sl > ZT_SIGNATURE_BUFFER_SIZE)
-		return -1;
-	_signatureLength = sl;
-	if ((p + sl) > len)
-		return -1;
-	memcpy(_signature,data + p,sl);
-	p += (int)sl;
+		if ((p + 2) > len)
+			return -1;
+		unsigned int sl = data[p++];
+		sl <<= 8U;
+		sl |= data[p++];
+		if (sl > ZT_SIGNATURE_BUFFER_SIZE)
+			return -1;
+		_signatureLength = sl;
+		if ((p + sl) > len)
+			return -1;
+		memcpy(_signature,data + p,sl);
+		p += (int)sl;
+	} else {
+		_ts = 0;
+	}
 
 
 	return p;
 	return p;
 }
 }

+ 129 - 82
node/Node.cpp

@@ -42,16 +42,20 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
 	RR(&_RR),
 	RR(&_RR),
 	_cb(*callbacks),
 	_cb(*callbacks),
 	_uPtr(uPtr),
 	_uPtr(uPtr),
-	_networks(8),
+	_networks(),
+	_networksMask(255),
 	_now(now),
 	_now(now),
 	_lastPing(0),
 	_lastPing(0),
 	_lastHousekeepingRun(0),
 	_lastHousekeepingRun(0),
 	_lastNetworkHousekeepingRun(0),
 	_lastNetworkHousekeepingRun(0),
+	_lastPathKeepaliveCheck(0),
 	_online(false)
 	_online(false)
 {
 {
-	memset(_expectingRepliesToBucketPtr,0,sizeof(_expectingRepliesToBucketPtr));
-	memset(_expectingRepliesTo,0,sizeof(_expectingRepliesTo));
-	memset(_lastIdentityVerification,0,sizeof(_lastIdentityVerification));
+	_networks.resize(256); // _networksMask + 1, must be power of two
+
+	memset((void *)_expectingRepliesToBucketPtr,0,sizeof(_expectingRepliesToBucketPtr));
+	memset((void *)_expectingRepliesTo,0,sizeof(_expectingRepliesTo));
+	memset((void *)_lastIdentityVerification,0,sizeof(_lastIdentityVerification));
 
 
 	uint64_t idtmp[2];
 	uint64_t idtmp[2];
 	idtmp[0] = 0; idtmp[1] = 0;
 	idtmp[0] = 0; idtmp[1] = 0;
@@ -118,7 +122,7 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
 Node::~Node()
 Node::~Node()
 {
 {
 	{
 	{
-		Mutex::Lock _l(_networks_m);
+		RWMutex::Lock _l(_networks_m);
 		_networks.clear(); // destroy all networks before shutdown
 		_networks.clear(); // destroy all networks before shutdown
 	}
 	}
 	if (RR->sa) RR->sa->~SelfAwareness();
 	if (RR->sa) RR->sa->~SelfAwareness();
@@ -128,6 +132,11 @@ Node::~Node()
 	free(RR->rtmem);
 	free(RR->rtmem);
 }
 }
 
 
+void Node::shutdown(void *tPtr)
+{
+	RR->topology->saveAll(tPtr);
+}
+
 ZT_ResultCode Node::processWirePacket(
 ZT_ResultCode Node::processWirePacket(
 	void *tptr,
 	void *tptr,
 	int64_t now,
 	int64_t now,
@@ -164,38 +173,33 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
 	}
 	}
 }
 }
 
 
-// This function object is run past every peer every ZT_PEER_PING_PERIOD.
 struct _processBackgroundTasks_ping_eachPeer
 struct _processBackgroundTasks_ping_eachPeer
 {
 {
 	int64_t now;
 	int64_t now;
 	Node *parent;
 	Node *parent;
 	void *tPtr;
 	void *tPtr;
 	bool online;
 	bool online;
-	ZT_ALWAYS_INLINE bool operator()(const SharedPtr<Peer> &peer,const bool isRoot)
+	ZT_ALWAYS_INLINE void operator()(const SharedPtr<Peer> &peer,const bool isRoot)
 	{
 	{
-		unsigned int v4SendCount = 0,v6SendCount = 0;
-		peer->ping(tPtr,now,v4SendCount,v6SendCount,isRoot);
-
-		if (isRoot) {
-			if ((now - peer->lastReceive()) <= (ZT_PEER_PING_PERIOD + 5000))
-				online = true;
-
-			if (v4SendCount == 0) {
-				InetAddress try4;
-				parent->externalPathLookup(tPtr,peer->identity(),AF_INET,try4);
-				if (try4.ss_family == AF_INET)
-					peer->sendHELLO(tPtr,-1,try4,now);
-			}
+		peer->ping(tPtr,now,isRoot);
+		if ((isRoot)&&((now - peer->lastReceive()) <= (ZT_PEER_PING_PERIOD + 5000)))
+			online = true;
+	}
+};
 
 
-			if (v6SendCount == 0) {
-				InetAddress try6;
-				parent->externalPathLookup(tPtr,peer->identity(),AF_INET6,try6);
-				if (try6.ss_family == AF_INET6)
-					peer->sendHELLO(tPtr,-1,try6,now);
-			}
+static uint16_t junk = 0;
+struct _processBackgroundTasks_path_keepalive
+{
+	int64_t now;
+	RuntimeEnvironment *RR;
+	void *tPtr;
+	ZT_ALWAYS_INLINE void operator()(const SharedPtr<Path> &path)
+	{
+		if ((now - path->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
+			++junk;
+			path->send(RR,tPtr,&junk,sizeof(junk),now);
+			path->sent(now);
 		}
 		}
-
-		return true;
 	}
 	}
 };
 };
 
 
@@ -204,14 +208,6 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
 	_now = now;
 	_now = now;
 	Mutex::Lock bl(_backgroundTasksLock);
 	Mutex::Lock bl(_backgroundTasksLock);
 
 
-	// Initialize these on first call so these things happen just a few seconds after
-	// startup, since right at startup things are likely to not be ready to communicate
-	// at all yet.
-	if (_lastNetworkHousekeepingRun <= 0)
-		_lastNetworkHousekeepingRun = now - (ZT_NETWORK_HOUSEKEEPING_PERIOD / 3);
-	if (_lastHousekeepingRun <= 0)
-		_lastHousekeepingRun = now;
-
 	if ((now - _lastPing) >= ZT_PEER_PING_PERIOD) {
 	if ((now - _lastPing) >= ZT_PEER_PING_PERIOD) {
 		_lastPing = now;
 		_lastPing = now;
 		try {
 		try {
@@ -236,12 +232,10 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
 	if ((now - _lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
 	if ((now - _lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
 		_lastHousekeepingRun = now;
 		_lastHousekeepingRun = now;
 		{
 		{
-			Mutex::Lock l(_networks_m);
-			Hashtable< uint64_t,SharedPtr<Network> >::Iterator i(_networks);
-			uint64_t *nwid = (uint64_t *)0;
-			SharedPtr<Network> *network = (SharedPtr<Network> *)0;
-			while (i.next(nwid,network)) {
-				(*network)->doPeriodicTasks(tPtr, now);
+			RWMutex::RLock l(_networks_m);
+			for(std::vector< SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i) {
+				if ((*i))
+					(*i)->doPeriodicTasks(tPtr,now);
 			}
 			}
 		}
 		}
 	}
 	}
@@ -265,13 +259,22 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
 				_localControllerAuthorizations_m.unlock();
 				_localControllerAuthorizations_m.unlock();
 			}
 			}
 
 
-			RR->topology->doPeriodicTasks(now);
+			RR->topology->doPeriodicTasks(tPtr, now);
 			RR->sa->clean(now);
 			RR->sa->clean(now);
 		} catch ( ... ) {
 		} catch ( ... ) {
 			return ZT_RESULT_FATAL_ERROR_INTERNAL;
 			return ZT_RESULT_FATAL_ERROR_INTERNAL;
 		}
 		}
 	}
 	}
 
 
+	if ((now - _lastPathKeepaliveCheck) >= ZT_PATH_KEEPALIVE_PERIOD) {
+		_lastPathKeepaliveCheck = now;
+		_processBackgroundTasks_path_keepalive pf;
+		pf.now = now;
+		pf.RR = RR;
+		pf.tPtr = tPtr;
+		RR->topology->eachPath<_processBackgroundTasks_path_keepalive &>(pf);
+	}
+
 	try {
 	try {
 		*nextBackgroundTaskDeadline = now + (int64_t)std::max(std::min((unsigned long)ZT_MAX_TIMER_TASK_INTERVAL,RR->sw->doTimerTasks(tPtr, now)), (unsigned long)ZT_MIN_TIMER_TASK_INTERVAL);
 		*nextBackgroundTaskDeadline = now + (int64_t)std::max(std::min((unsigned long)ZT_MAX_TIMER_TASK_INTERVAL,RR->sw->doTimerTasks(tPtr, now)), (unsigned long)ZT_MIN_TIMER_TASK_INTERVAL);
 	} catch ( ... ) {
 	} catch ( ... ) {
@@ -283,35 +286,68 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
 
 
 ZT_ResultCode Node::join(uint64_t nwid,void *uptr,void *tptr)
 ZT_ResultCode Node::join(uint64_t nwid,void *uptr,void *tptr)
 {
 {
-	Mutex::Lock _l(_networks_m);
-	SharedPtr<Network> &nw = _networks[nwid];
-	if (!nw)
-		nw = SharedPtr<Network>(new Network(RR,tptr,nwid,uptr,(const NetworkConfig *)0));
+	RWMutex::Lock l(_networks_m);
+
+	const uint64_t nwidHashed = nwid + (nwid >> 32U);
+	SharedPtr<Network> *nw = &(_networks[(unsigned long)(nwidHashed & _networksMask)]);
+
+	// Enlarge flat hash table of networks until all networks fit without collisions.
+	if (*nw) {
+		unsigned long newNetworksSize = (unsigned long)_networks.size();
+		std::vector< SharedPtr<Network> > newNetworks;
+		uint64_t newNetworksMask;
+		std::vector< SharedPtr<Network> >::const_iterator i;
+
+try_larger_network_hashtable:
+		newNetworksSize <<= 1U; // must remain a power of two
+		newNetworks.clear();
+		newNetworks.resize(newNetworksSize);
+		newNetworksMask = (uint64_t)(newNetworksSize - 1);
+
+		for(i=_networks.begin();i!=_networks.end();++i) {
+			const uint64_t id = (*i)->id();
+			nw = &(newNetworks[(unsigned long)((id + (id >> 32U)) & newNetworksMask)]);
+			if (*nw)
+				goto try_larger_network_hashtable;
+			*nw = *i;
+		}
+		if (newNetworks[(unsigned long)(nwidHashed & newNetworksMask)])
+			goto try_larger_network_hashtable;
+
+		_networks.swap(newNetworks);
+		_networksMask = newNetworksMask;
+		nw = &(_networks[(unsigned long)(nwidHashed & newNetworksMask)]);
+	}
+
+	nw->set(new Network(RR,tptr,nwid,uptr,(const NetworkConfig *)0));
+
 	return ZT_RESULT_OK;
 	return ZT_RESULT_OK;
 }
 }
 
 
 ZT_ResultCode Node::leave(uint64_t nwid,void **uptr,void *tptr)
 ZT_ResultCode Node::leave(uint64_t nwid,void **uptr,void *tptr)
 {
 {
+	const uint64_t nwidHashed = nwid + (nwid >> 32U);
+
 	ZT_VirtualNetworkConfig ctmp;
 	ZT_VirtualNetworkConfig ctmp;
 	void **nUserPtr = (void **)0;
 	void **nUserPtr = (void **)0;
 	{
 	{
-		Mutex::Lock _l(_networks_m);
-		SharedPtr<Network> *nw = _networks.get(nwid);
+		RWMutex::RLock l(_networks_m);
+		SharedPtr<Network> &nw = _networks[(unsigned long)(nwidHashed & _networksMask)];
 		if (!nw)
 		if (!nw)
 			return ZT_RESULT_OK;
 			return ZT_RESULT_OK;
 		if (uptr)
 		if (uptr)
-			*uptr = (*nw)->userPtr();
-		(*nw)->externalConfig(&ctmp);
-		(*nw)->destroy();
-		nUserPtr = (*nw)->userPtr();
+			*uptr = nw->userPtr();
+		nw->externalConfig(&ctmp);
+		nw->destroy();
+		nUserPtr = nw->userPtr();
 	}
 	}
 
 
 	if (nUserPtr)
 	if (nUserPtr)
 		RR->node->configureVirtualNetworkPort(tptr,nwid,nUserPtr,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY,&ctmp);
 		RR->node->configureVirtualNetworkPort(tptr,nwid,nUserPtr,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY,&ctmp);
 
 
 	{
 	{
-		Mutex::Lock _l(_networks_m);
-		_networks.erase(nwid);
+		RWMutex::Lock _l(_networks_m);
+		_networks[(unsigned long)(nwidHashed & _networksMask)].zero();
 	}
 	}
 
 
 	uint64_t tmp[2];
 	uint64_t tmp[2];
@@ -433,11 +469,10 @@ ZT_PeerList *Node::peers() const
 
 
 ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
 ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
 {
 {
-	Mutex::Lock _l(_networks_m);
-	const SharedPtr<Network> *nw = _networks.get(nwid);
+	SharedPtr<Network> nw(network(nwid));
 	if (nw) {
 	if (nw) {
-		ZT_VirtualNetworkConfig *nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
-		(*nw)->externalConfig(nc);
+		ZT_VirtualNetworkConfig *const nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
+		nw->externalConfig(nc);
 		return nc;
 		return nc;
 	}
 	}
 	return (ZT_VirtualNetworkConfig *)0;
 	return (ZT_VirtualNetworkConfig *)0;
@@ -445,30 +480,34 @@ ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
 
 
 ZT_VirtualNetworkList *Node::networks() const
 ZT_VirtualNetworkList *Node::networks() const
 {
 {
-	Mutex::Lock _l(_networks_m);
+	RWMutex::RLock l(_networks_m);
 
 
-	char *buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * _networks.size()));
+	unsigned long networkCount = 0;
+	for(std::vector< SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i) {
+		if ((*i))
+			++networkCount;
+	}
+
+	char *const buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * networkCount));
 	if (!buf)
 	if (!buf)
 		return (ZT_VirtualNetworkList *)0;
 		return (ZT_VirtualNetworkList *)0;
 	ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf;
 	ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf;
 	nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
 	nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
 
 
 	nl->networkCount = 0;
 	nl->networkCount = 0;
-	Hashtable< uint64_t,SharedPtr<Network> >::Iterator i(*const_cast< Hashtable< uint64_t,SharedPtr<Network> > *>(&_networks));
-	uint64_t *k = (uint64_t *)0;
-	SharedPtr<Network> *v = (SharedPtr<Network> *)0;
-	while (i.next(k,v))
-		(*v)->externalConfig(&(nl->networks[nl->networkCount++]));
+	for(std::vector< SharedPtr<Network> >::const_iterator i(_networks.begin());i!=_networks.end();++i) {
+		if ((*i))
+			(*i)->externalConfig(&(nl->networks[nl->networkCount++]));
+	}
 
 
 	return nl;
 	return nl;
 }
 }
 
 
 void Node::setNetworkUserPtr(uint64_t nwid,void *ptr)
 void Node::setNetworkUserPtr(uint64_t nwid,void *ptr)
 {
 {
-	Mutex::Lock _l(_networks_m);
-	const SharedPtr<Network> *const nw = _networks.get(nwid);
+	SharedPtr<Network> nw(network(nwid));
 	if (nw)
 	if (nw)
-		*((*nw)->userPtr()) = ptr;
+		*(nw->userPtr()) = ptr;
 }
 }
 
 
 void Node::freeQueryResult(void *qr)
 void Node::freeQueryResult(void *qr)
@@ -524,23 +563,20 @@ void Node::setController(void *networkControllerInstance)
 
 
 bool Node::shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,const int64_t localSocket,const InetAddress &remoteAddress)
 bool Node::shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,const int64_t localSocket,const InetAddress &remoteAddress)
 {
 {
-	if (!Path::isAddressValidForPath(remoteAddress))
-		return false;
-	{
-		Mutex::Lock _l(_networks_m);
-		Hashtable< uint64_t,SharedPtr<Network> >::Iterator i(_networks);
-		uint64_t *k = (uint64_t *)0;
-		SharedPtr<Network> *v = (SharedPtr<Network> *)0;
-		while (i.next(k,v)) {
-			if ((*v)->hasConfig()) {
-				for(unsigned int k=0;k<(*v)->config().staticIpCount;++k) {
-					if ((*v)->config().staticIps[k].containsAddress(remoteAddress))
+	if (Path::isAddressValidForPath(remoteAddress)) {
+		RWMutex::RLock l(_networks_m);
+		for(std::vector< SharedPtr<Network> >::iterator i(_networks.begin());i!=_networks.end();++i) {
+			if ((*i)) {
+				for(unsigned int k=0,j=(*i)->config().staticIpCount;k<j;++k) {
+					if ((*i)->config().staticIps[k].containsAddress(remoteAddress))
 						return false;
 						return false;
 				}
 				}
 			}
 			}
 		}
 		}
+	} else {
+		return false;
 	}
 	}
-	return ( (_cb.pathCheckFunction) ? (_cb.pathCheckFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ztaddr.toInt(),localSocket,reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0) : true);
+	return ((_cb.pathCheckFunction) ? (_cb.pathCheckFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ztaddr.toInt(),localSocket,reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0) : true);
 }
 }
 
 
 bool Node::externalPathLookup(void *tPtr,const Identity &id,int family,InetAddress &addr)
 bool Node::externalPathLookup(void *tPtr,const Identity &id,int family,InetAddress &addr)
@@ -564,6 +600,16 @@ ZT_ResultCode Node::setPhysicalPathConfiguration(const struct sockaddr_storage *
 	return ZT_RESULT_OK;
 	return ZT_RESULT_OK;
 }
 }
 
 
+bool Node::localControllerHasAuthorized(const int64_t now,const uint64_t nwid,const Address &addr) const
+{
+	_localControllerAuthorizations_m.lock();
+	const int64_t *const at = _localControllerAuthorizations.get(_LocalControllerAuth(nwid,addr));
+	_localControllerAuthorizations_m.unlock();
+	if (at)
+		return ((now - *at) < (ZT_NETWORK_AUTOCONF_DELAY * 3));
+	return false;
+}
+
 void Node::ncSendConfig(uint64_t nwid,uint64_t requestPacketId,const Address &destination,const NetworkConfig &nc,bool sendLegacyFormatConfig)
 void Node::ncSendConfig(uint64_t nwid,uint64_t requestPacketId,const Address &destination,const NetworkConfig &nc,bool sendLegacyFormatConfig)
 {
 {
 	_localControllerAuthorizations_m.lock();
 	_localControllerAuthorizations_m.lock();
@@ -690,9 +736,10 @@ enum ZT_ResultCode ZT_Node_new(ZT_Node **node,void *uptr,void *tptr,const struct
 	}
 	}
 }
 }
 
 
-void ZT_Node_delete(ZT_Node *node)
+void ZT_Node_delete(ZT_Node *node,void *tPtr)
 {
 {
 	try {
 	try {
+		reinterpret_cast<ZeroTier::Node *>(node)->shutdown(tPtr);
 		delete (reinterpret_cast<ZeroTier::Node *>(node));
 		delete (reinterpret_cast<ZeroTier::Node *>(node));
 	} catch ( ... ) {}
 	} catch ( ... ) {}
 }
 }

+ 162 - 31
node/Node.hpp

@@ -51,9 +51,18 @@ class Locator;
 class Node : public NetworkController::Sender
 class Node : public NetworkController::Sender
 {
 {
 public:
 public:
-	Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, int64_t now);
+	Node(void *uPtr,void *tPtr,const struct ZT_Node_Callbacks *callbacks,int64_t now);
 	virtual ~Node();
 	virtual ~Node();
 
 
+	/**
+	 * Perform any operations that should be done prior to deleting a Node
+	 *
+	 * This is technically optional but recommended.
+	 *
+	 * @param tPtr Thread pointer to pass through to callbacks
+	 */
+	void shutdown(void *tPtr);
+
 	// Get rid of alignment warnings on 32-bit Windows and possibly improve performance
 	// Get rid of alignment warnings on 32-bit Windows and possibly improve performance
 #ifdef __WINDOWS__
 #ifdef __WINDOWS__
 	void * operator new(size_t i) { return _mm_malloc(i,16); }
 	void * operator new(size_t i) { return _mm_malloc(i,16); }
@@ -101,8 +110,22 @@ public:
 
 
 	// Internal functions ------------------------------------------------------
 	// Internal functions ------------------------------------------------------
 
 
+	/**
+	 * @return Most recent time value supplied to core via API
+	 */
 	ZT_ALWAYS_INLINE int64_t now() const { return _now; }
 	ZT_ALWAYS_INLINE int64_t now() const { return _now; }
 
 
+	/**
+	 * Send packet to to the physical wire via callback
+	 *
+	 * @param tPtr Thread pointer
+	 * @param localSocket Local socket or -1 to use all/any
+	 * @param addr Destination address
+	 * @param data Data to send
+	 * @param len Length in bytes
+	 * @param ttl TTL or 0 for default/max
+	 * @return True if send appears successful
+	 */
 	ZT_ALWAYS_INLINE bool putPacket(void *tPtr,const int64_t localSocket,const InetAddress &addr,const void *data,unsigned int len,unsigned int ttl = 0)
 	ZT_ALWAYS_INLINE bool putPacket(void *tPtr,const int64_t localSocket,const InetAddress &addr,const void *data,unsigned int len,unsigned int ttl = 0)
 	{
 	{
 		return (_cb.wirePacketSendFunction(
 		return (_cb.wirePacketSendFunction(
@@ -116,6 +139,19 @@ public:
 			ttl) == 0);
 			ttl) == 0);
 	}
 	}
 
 
+	/**
+	 * Inject frame into virtual Ethernet tap
+	 *
+	 * @param tPtr Thread pointer
+	 * @param nwid Network ID
+	 * @param nuptr Network-associated user pointer
+	 * @param source Source MAC address
+	 * @param dest Destination MAC address
+	 * @param etherType 16-bit Ethernet type
+	 * @param vlanId Ethernet VLAN ID (currently unused)
+	 * @param data Ethernet frame data
+	 * @param len Ethernet frame length in bytes
+	 */
 	ZT_ALWAYS_INLINE void putFrame(void *tPtr,uint64_t nwid,void **nuptr,const MAC &source,const MAC &dest,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
 	ZT_ALWAYS_INLINE void putFrame(void *tPtr,uint64_t nwid,void **nuptr,const MAC &source,const MAC &dest,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
 	{
 	{
 		_cb.virtualNetworkFrameFunction(
 		_cb.virtualNetworkFrameFunction(
@@ -132,30 +168,118 @@ public:
 			len);
 			len);
 	}
 	}
 
 
+	/**
+	 * @param nwid Network ID
+	 * @return Network associated with ID
+	 */
 	ZT_ALWAYS_INLINE SharedPtr<Network> network(uint64_t nwid) const
 	ZT_ALWAYS_INLINE SharedPtr<Network> network(uint64_t nwid) const
 	{
 	{
-		Mutex::Lock _l(_networks_m);
-		const SharedPtr<Network> *n = _networks.get(nwid);
-		if (n)
-			return *n;
-		return SharedPtr<Network>();
+		RWMutex::RLock l(_networks_m);
+		return _networks[(unsigned long)((nwid + (nwid >> 32U)) & _networksMask)];
 	}
 	}
 
 
-	ZT_ALWAYS_INLINE std::vector<ZT_InterfaceAddress> directPaths() const
+	/**
+	 * @return Known local interface addresses for this node
+	 */
+	ZT_ALWAYS_INLINE std::vector<ZT_InterfaceAddress> localInterfaceAddresses() const
 	{
 	{
 		Mutex::Lock _l(_localInterfaceAddresses_m);
 		Mutex::Lock _l(_localInterfaceAddresses_m);
 		return _localInterfaceAddresses;
 		return _localInterfaceAddresses;
 	}
 	}
 
 
+	/**
+	 * Post an event via external callback
+	 *
+	 * @param tPtr Thread pointer
+	 * @param ev Event object
+	 * @param md Event data or NULL if none
+	 */
 	ZT_ALWAYS_INLINE void postEvent(void *tPtr,ZT_Event ev,const void *md = (const void *)0) { _cb.eventCallback(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ev,md); }
 	ZT_ALWAYS_INLINE void postEvent(void *tPtr,ZT_Event ev,const void *md = (const void *)0) { _cb.eventCallback(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ev,md); }
+
+	/**
+	 * Post network port configuration via external callback
+	 *
+	 * @param tPtr Thread pointer
+	 * @param nwid Network ID
+	 * @param nuptr Network-associated user pointer
+	 * @param op Config operation or event type
+	 * @param nc Network config info
+	 */
 	ZT_ALWAYS_INLINE void configureVirtualNetworkPort(void *tPtr,uint64_t nwid,void **nuptr,ZT_VirtualNetworkConfigOperation op,const ZT_VirtualNetworkConfig *nc) { _cb.virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,nwid,nuptr,op,nc); }
 	ZT_ALWAYS_INLINE void configureVirtualNetworkPort(void *tPtr,uint64_t nwid,void **nuptr,ZT_VirtualNetworkConfigOperation op,const ZT_VirtualNetworkConfig *nc) { _cb.virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,nwid,nuptr,op,nc); }
+
+	/**
+	 * @return True if node appears online
+	 */
 	ZT_ALWAYS_INLINE bool online() const { return _online; }
 	ZT_ALWAYS_INLINE bool online() const { return _online; }
+
+	/**
+	 * Get a state object
+	 *
+	 * @param tPtr Thread pointer
+	 * @param type Object type to get
+	 * @param id Object ID
+	 * @param data Data buffer
+	 * @param maxlen Maximum data length
+	 * @return Number of bytes actually read or 0 if not found
+	 */
 	ZT_ALWAYS_INLINE int stateObjectGet(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2],void *const data,const unsigned int maxlen) { return _cb.stateGetFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,type,id,data,maxlen); }
 	ZT_ALWAYS_INLINE int stateObjectGet(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2],void *const data,const unsigned int maxlen) { return _cb.stateGetFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,type,id,data,maxlen); }
+
+	/**
+	 * Store a state object
+	 *
+	 * @param tPtr Thread pointer
+	 * @param type Object type to get
+	 * @param id Object ID
+	 * @param data Data to store
+	 * @param len Length of data
+	 */
 	ZT_ALWAYS_INLINE void stateObjectPut(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2],const void *const data,const unsigned int len) { _cb.statePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,type,id,data,(int)len); }
 	ZT_ALWAYS_INLINE void stateObjectPut(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2],const void *const data,const unsigned int len) { _cb.statePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,type,id,data,(int)len); }
+
+	/**
+	 * Delete a state object
+	 *
+	 * @param tPtr Thread pointer
+	 * @param type Object type to delete
+	 * @param id Object ID
+	 */
 	ZT_ALWAYS_INLINE void stateObjectDelete(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2]) { _cb.statePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,type,id,(const void *)0,-1); }
 	ZT_ALWAYS_INLINE void stateObjectDelete(void *const tPtr,ZT_StateObjectType type,const uint64_t id[2]) { _cb.statePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,type,id,(const void *)0,-1); }
+
+	/**
+	 * Check whether a path should be used for ZeroTier traffic
+	 *
+	 * This performs internal checks and also calls out to an external callback if one is defined.
+	 *
+	 * @param tPtr Thread pointer
+	 * @param ztaddr ZeroTier address
+	 * @param localSocket Local socket or -1 if unknown
+	 * @param remoteAddress Remote address
+	 * @return True if path should be used
+	 */
 	bool shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,const int64_t localSocket,const InetAddress &remoteAddress);
 	bool shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,const int64_t localSocket,const InetAddress &remoteAddress);
+
+	/**
+	 * Query callback for a physical address for a peer
+	 *
+	 * @param tPtr Thread pointer
+	 * @param id Full identity of ZeroTier node
+	 * @param family Desired address family or -1 for any
+	 * @param addr Buffer to store address (result paramter)
+	 * @return True if addr was filled with something
+	 */
 	bool externalPathLookup(void *tPtr,const Identity &id,int family,InetAddress &addr);
 	bool externalPathLookup(void *tPtr,const Identity &id,int family,InetAddress &addr);
+
+	/**
+	 * Set physical path configuration
+	 *
+	 * @param pathNetwork Physical path network/netmask bits (CIDR notation)
+	 * @param pathConfig Path configuration
+	 * @return Return to pass through to external API
+	 */
 	ZT_ResultCode setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig);
 	ZT_ResultCode setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig);
+
+	/**
+	 * @return This node's identity
+	 */
 	ZT_ALWAYS_INLINE const Identity &identity() const { return _RR.identity; }
 	ZT_ALWAYS_INLINE const Identity &identity() const { return _RR.identity; }
 
 
 	/**
 	/**
@@ -212,23 +336,26 @@ public:
 		return false;
 		return false;
 	}
 	}
 
 
+	/**
+	 * Check whether a local controller has authorized a member on a network
+	 *
+	 * This is used by controllers to avoid needless certificate checks when we already
+	 * know if this has occurred. It's a bit of a hack but saves a massive amount of
+	 * controller CPU. It's easiest to put this here, and it imposes no overhead on
+	 * non-controllers.
+	 *
+	 * @param now Current time
+	 * @param nwid Network ID
+	 * @param addr Member address to check
+	 * @return True if member has been authorized
+	 */
+	bool localControllerHasAuthorized(int64_t now,uint64_t nwid,const Address &addr) const;
+
+	// Implementation of NetworkController::Sender interface
 	virtual void ncSendConfig(uint64_t nwid,uint64_t requestPacketId,const Address &destination,const NetworkConfig &nc,bool sendLegacyFormatConfig);
 	virtual void ncSendConfig(uint64_t nwid,uint64_t requestPacketId,const Address &destination,const NetworkConfig &nc,bool sendLegacyFormatConfig);
 	virtual void ncSendRevocation(const Address &destination,const Revocation &rev);
 	virtual void ncSendRevocation(const Address &destination,const Revocation &rev);
 	virtual void ncSendError(uint64_t nwid,uint64_t requestPacketId,const Address &destination,NetworkController::ErrorCode errorCode);
 	virtual void ncSendError(uint64_t nwid,uint64_t requestPacketId,const Address &destination,NetworkController::ErrorCode errorCode);
 
 
-	inline bool localControllerHasAuthorized(const int64_t now,const uint64_t nwid,const Address &addr) const
-	{
-		_localControllerAuthorizations_m.lock();
-		const int64_t *const at = _localControllerAuthorizations.get(_LocalControllerAuth(nwid,addr));
-		_localControllerAuthorizations_m.unlock();
-		if (at)
-			return ((now - *at) < (ZT_NETWORK_AUTOCONF_DELAY * 3));
-		return false;
-	}
-
-	inline void setMultipathMode(uint8_t mode) { _multipathMode = mode; }
-	inline uint8_t getMultipathMode() { return _multipathMode; }
-
 private:
 private:
 	RuntimeEnvironment _RR;
 	RuntimeEnvironment _RR;
 	RuntimeEnvironment *RR;
 	RuntimeEnvironment *RR;
@@ -236,11 +363,11 @@ private:
 	void *_uPtr; // _uptr (lower case) is reserved in Visual Studio :P
 	void *_uPtr; // _uptr (lower case) is reserved in Visual Studio :P
 
 
 	// For tracking packet IDs to filter out OK/ERROR replies to packets we did not send
 	// For tracking packet IDs to filter out OK/ERROR replies to packets we did not send
-	uint8_t _expectingRepliesToBucketPtr[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1];
-	uint32_t _expectingRepliesTo[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1][ZT_EXPECTING_REPLIES_BUCKET_MASK2 + 1];
+	volatile uint8_t _expectingRepliesToBucketPtr[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1];
+	volatile uint32_t _expectingRepliesTo[ZT_EXPECTING_REPLIES_BUCKET_MASK1 + 1][ZT_EXPECTING_REPLIES_BUCKET_MASK2 + 1];
 
 
 	// Time of last identity verification indexed by InetAddress.rateGateHash() -- used in IncomingPacket::_doHELLO() via rateGateIdentityVerification()
 	// Time of last identity verification indexed by InetAddress.rateGateHash() -- used in IncomingPacket::_doHELLO() via rateGateIdentityVerification()
-	int64_t _lastIdentityVerification[16384];
+	volatile int64_t _lastIdentityVerification[16384];
 
 
 	/* Map that remembers if we have recently sent a network config to someone
 	/* Map that remembers if we have recently sent a network config to someone
 	 * querying us as a controller. This is an optimization to allow network
 	 * querying us as a controller. This is an optimization to allow network
@@ -256,21 +383,25 @@ private:
 		ZT_ALWAYS_INLINE bool operator!=(const _LocalControllerAuth &a) const { return ((a.nwid != nwid)||(a.address != address)); }
 		ZT_ALWAYS_INLINE bool operator!=(const _LocalControllerAuth &a) const { return ((a.nwid != nwid)||(a.address != address)); }
 	};
 	};
 	Hashtable< _LocalControllerAuth,int64_t > _localControllerAuthorizations;
 	Hashtable< _LocalControllerAuth,int64_t > _localControllerAuthorizations;
-	Hashtable< uint64_t,SharedPtr<Network> > _networks;
+
+	// Networks are stored in a flat hash table that is resized on any network ID collision. This makes
+	// network lookup by network ID a few bitwise ops and an array index.
+	std::vector< SharedPtr<Network> > _networks;
+	uint64_t _networksMask;
+
 	std::vector< ZT_InterfaceAddress > _localInterfaceAddresses;
 	std::vector< ZT_InterfaceAddress > _localInterfaceAddresses;
 
 
 	Mutex _localControllerAuthorizations_m;
 	Mutex _localControllerAuthorizations_m;
-	Mutex _networks_m;
+	RWMutex _networks_m;
 	Mutex _localInterfaceAddresses_m;
 	Mutex _localInterfaceAddresses_m;
 	Mutex _backgroundTasksLock;
 	Mutex _backgroundTasksLock;
 
 
-	uint8_t _multipathMode;
-
 	volatile int64_t _now;
 	volatile int64_t _now;
-	int64_t _lastPing;
-	int64_t _lastHousekeepingRun;
-	int64_t _lastNetworkHousekeepingRun;
-	bool _online;
+	volatile int64_t _lastPing;
+	volatile int64_t _lastHousekeepingRun;
+	volatile int64_t _lastNetworkHousekeepingRun;
+	volatile int64_t _lastPathKeepaliveCheck;
+	volatile bool _online;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 9 - 8
node/OS.hpp

@@ -33,6 +33,15 @@
 #endif
 #endif
 #endif
 #endif
 
 
+#if !defined(__GCC__) && !defined (__clang__) && !defined(__INTEL_COMPILER)
+#define ZT_PACKED_STRUCT(D) __pragma(pack(push,1)) D __pragma(pack(pop))
+#pragma warning(disable : 4290)
+#pragma warning(disable : 4996)
+#pragma warning(disable : 4101)
+#else
+#define ZT_PACKED_STRUCT(D) D __attribute__((packed))
+#endif
+
 #if defined(_WIN32) || defined(_WIN64)
 #if defined(_WIN32) || defined(_WIN64)
 #ifndef __WINDOWS__
 #ifndef __WINDOWS__
 #define __WINDOWS__
 #define __WINDOWS__
@@ -42,14 +51,6 @@
 #endif
 #endif
 #undef __UNIX_LIKE__
 #undef __UNIX_LIKE__
 #undef __BSD__
 #undef __BSD__
-#if !defined(__GNUC__) && !defined (__clang__) && !defined(__INTEL_COMPILER)
-#define ZT_PACKED_STRUCT(D) __pragma(pack(push,1)) D __pragma(pack(pop))
-#pragma warning(disable : 4290)
-#pragma warning(disable : 4996)
-#pragma warning(disable : 4101)
-#else
-#define ZT_PACKED_STRUCT(D) D __attribute__((packed))
-#endif
 #include <WinSock2.h>
 #include <WinSock2.h>
 #include <Windows.h>
 #include <Windows.h>
 #endif
 #endif

+ 8 - 17
node/Packet.hpp

@@ -237,17 +237,7 @@
 /**
 /**
  * Signed locator for this node
  * Signed locator for this node
  */
  */
-#define ZT_PROTO_NODE_META_LOCATOR "L"
-
-/**
- * Dictionary mapping identity hash to timestamp to request newer locators for other nodes if known
- */
-#define ZT_PROTO_NODE_META_REFRESH_LOCATORS_IF_NEWER "lt"
-
-/**
- * Dictionary mapping identity hash to locator to supply newer revisions of requested locators
- */
-#define ZT_PROTO_NODE_META_REFRESH_LOCATORS "lr"
+#define ZT_PROTO_NODE_META_LOCATOR "l"
 
 
 /**
 /**
  * Ephemeral C25519 public key
  * Ephemeral C25519 public key
@@ -523,16 +513,17 @@ public:
 		 *   [<[...] additional addresses to look up>
 		 *   [<[...] additional addresses to look up>
 		 *
 		 *
 		 * OK response payload:
 		 * OK response payload:
-		 *   <[...] binary serialized identity>
-		 *  [<[...] additional binary serialized identities>]
-		 *
-		 * If querying a cluster, duplicate OK responses may occasionally occur.
-		 * These must be tolerated, which is easy since they'll have info you
-		 * already have.
+		 *   <[...] identity>
+		 *   <[...] locator>
+		 *   [... additional identity/locator pairs]
 		 *
 		 *
 		 * If the address is not found, no response is generated. The semantics
 		 * If the address is not found, no response is generated. The semantics
 		 * of WHOIS is similar to ARP and NDP in that persistent retrying can
 		 * of WHOIS is similar to ARP and NDP in that persistent retrying can
 		 * be performed.
 		 * be performed.
+		 *
+		 * It is possible for an identity but a null/empty locator to be returned
+		 * if no locator is known for a node. Older versions will also send no
+		 * locator field at all.
 		 */
 		 */
 		VERB_WHOIS = 0x04,
 		VERB_WHOIS = 0x04,
 
 

+ 2 - 2
node/Path.hpp

@@ -97,14 +97,14 @@ public:
 	 *
 	 *
 	 * @param t Time of send
 	 * @param t Time of send
 	 */
 	 */
-	ZT_ALWAYS_INLINE void sent(const uint64_t t) { _lastOut = t; }
+	ZT_ALWAYS_INLINE void sent(const int64_t t) { _lastOut = t; }
 
 
 	/**
 	/**
 	 * Called when a packet is received from this remote path, regardless of content
 	 * Called when a packet is received from this remote path, regardless of content
 	 *
 	 *
 	 * @param t Time of receive
 	 * @param t Time of receive
 	 */
 	 */
-	ZT_ALWAYS_INLINE void received(const uint64_t t) { _lastIn = t; }
+	ZT_ALWAYS_INLINE void received(const int64_t t) { _lastIn = t; }
 
 
 	/**
 	/**
 	 * Check path aliveness
 	 * Check path aliveness

+ 204 - 97
node/Peer.cpp

@@ -21,6 +21,8 @@
 #include "Trace.hpp"
 #include "Trace.hpp"
 #include "InetAddress.hpp"
 #include "InetAddress.hpp"
 
 
+#include <set>
+
 namespace ZeroTier {
 namespace ZeroTier {
 
 
 struct _PathPriorityComparisonOperator
 struct _PathPriorityComparisonOperator
@@ -31,25 +33,28 @@ struct _PathPriorityComparisonOperator
 	}
 	}
 };
 };
 
 
-Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) :
+Peer::Peer(const RuntimeEnvironment *renv) :
 	RR(renv),
 	RR(renv),
 	_lastReceive(0),
 	_lastReceive(0),
 	_lastWhoisRequestReceived(0),
 	_lastWhoisRequestReceived(0),
 	_lastEchoRequestReceived(0),
 	_lastEchoRequestReceived(0),
 	_lastPushDirectPathsReceived(0),
 	_lastPushDirectPathsReceived(0),
-	_lastPushDirectPathsSent(0),
+	_lastAttemptedP2PInit(0),
 	_lastTriedStaticPath(0),
 	_lastTriedStaticPath(0),
 	_lastPrioritizedPaths(0),
 	_lastPrioritizedPaths(0),
 	_latency(0xffff),
 	_latency(0xffff),
-	_alivePathCount(0),
-	_id(peerIdentity),
-	_vProto(0),
-	_vMajor(0),
-	_vMinor(0),
-	_vRevision(0)
+	_alivePathCount(0)
 {
 {
-	if (!myIdentity.agree(peerIdentity,_key))
-		throw ZT_EXCEPTION_INVALID_ARGUMENT;
+}
+
+bool Peer::init(const Identity &myIdentity,const Identity &peerIdentity)
+{
+	_id = peerIdentity;
+	_vProto = 0;
+	_vMajor = 0;
+	_vMinor = 0;
+	_vRevision = 0;
+	return myIdentity.agree(peerIdentity,_key);
 }
 }
 
 
 void Peer::received(
 void Peer::received(
@@ -67,17 +72,17 @@ void Peer::received(
 	_lastReceive = now;
 	_lastReceive = now;
 
 
 	if (hops == 0) {
 	if (hops == 0) {
-		_paths_l.rlock();
-		for(int i=0;i<(int)_alivePathCount; ++i) {
+		_lock.rlock();
+		for(int i=0;i<(int)_alivePathCount;++i) {
 			if (_paths[i] == path) {
 			if (_paths[i] == path) {
-				_paths_l.runlock();
+				_lock.runlock();
 				goto path_check_done;
 				goto path_check_done;
 			}
 			}
 		}
 		}
-		_paths_l.runlock();
+		_lock.runlock();
 
 
 		if (verb == Packet::VERB_OK) {
 		if (verb == Packet::VERB_OK) {
-			RWMutex::Lock l(_paths_l);
+			RWMutex::Lock l(_lock);
 
 
 			int64_t lastReceiveTimeMax = 0;
 			int64_t lastReceiveTimeMax = 0;
 			int lastReceiveTimeMaxAt = 0;
 			int lastReceiveTimeMaxAt = 0;
@@ -105,6 +110,7 @@ void Peer::received(
 
 
 			_lastPrioritizedPaths = now;
 			_lastPrioritizedPaths = now;
 			_paths[lastReceiveTimeMaxAt] = path;
 			_paths[lastReceiveTimeMaxAt] = path;
+			_bootstrap = path->address();
 			_prioritizePaths(now);
 			_prioritizePaths(now);
 			RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
 			RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
 		} else {
 		} else {
@@ -117,72 +123,84 @@ void Peer::received(
 	}
 	}
 
 
 path_check_done:
 path_check_done:
-	const int64_t sinceLastPush = now - _lastPushDirectPathsSent;
-	if (sinceLastPush >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)) {
-		_lastPushDirectPathsReceived = now;
-	}
-
-	/*
-	const int64_t sinceLastPush = now - _lastDirectPathPushSent;
-	if (sinceLastPush >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)) {
-		_lastDirectPathPushSent = now;
-		std::vector<ZT_InterfaceAddress> pathsToPush(RR->node->directPaths());
-		if (pathsToPush.size() > 0) {
-			std::vector<ZT_InterfaceAddress>::const_iterator p(pathsToPush.begin());
-			while (p != pathsToPush.end()) {
-				ScopedPtr<Packet> outp(new Packet(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS));
-				outp->addSize(2); // leave room for count
-				unsigned int count = 0;
-				while ((p != pathsToPush.end())&&((outp->size() + 24) < 1200)) {
-					uint8_t addressType = 4;
-					uint8_t addressLength = 6;
-					unsigned int ipLength = 4;
-					const void *rawIpData;
-					const void *rawIpPort;
-					switch(p->address.ss_family) {
-						case AF_INET:
-							rawIpData = &(reinterpret_cast<const struct sockaddr_in *>(&(p->address))->sin_addr.s_addr);
-							rawIpPort = &(reinterpret_cast<const struct sockaddr_in *>(&(p->address))->sin_port);
-							break;
-						case AF_INET6:
-							rawIpData = reinterpret_cast<const struct sockaddr_in6 *>(&(p->address))->sin6_addr.s6_addr;
-							rawIpPort = &(reinterpret_cast<const struct sockaddr_in6 *>(&(p->address))->sin6_port);
-							addressType = 6;
-							addressLength = 18;
-							ipLength = 16;
-							break;
-						default: // we currently only push IP addresses
-							++p;
-							continue;
-					}
+	const int64_t sinceLastP2PInit = now - _lastAttemptedP2PInit;
+	if (sinceLastP2PInit >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)) {
+		_lastAttemptedP2PInit = now;
+
+		InetAddress addr;
+		if (_bootstrap)
+			sendHELLO(tPtr,-1,_bootstrap,now);
+		if (RR->node->externalPathLookup(tPtr,_id,-1,addr)) {
+			if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id.address(),-1,addr))
+				sendHELLO(tPtr,-1,addr,now);
+		}
 
 
-					outp->append((uint8_t)0); // no flags
-					outp->append((uint16_t)0); // no extensions
-					outp->append(addressType);
-					outp->append(addressLength);
-					outp->append(rawIpData,ipLength);
-					outp->append(rawIpPort,2);
+		std::vector<ZT_InterfaceAddress> localInterfaceAddresses(RR->node->localInterfaceAddresses());
+		std::multimap<unsigned long,InetAddress> detectedAddresses(RR->sa->externalAddresses(now));
+		std::set<InetAddress> addrs;
+		for(std::vector<ZT_InterfaceAddress>::const_iterator i(localInterfaceAddresses.begin());i!=localInterfaceAddresses.end();++i)
+			addrs.insert(asInetAddress(i->address));
+		for(std::multimap<unsigned long,InetAddress>::const_reverse_iterator i(detectedAddresses.rbegin());i!=detectedAddresses.rend();++i) {
+			if (i->first <= 1)
+				break;
+			if (addrs.count(i->second) == 0) {
+				addrs.insert(i->second);
+				break;
+			}
+		}
 
 
-					++count;
-					++p;
-				}
-				if (count) {
-					outp->setAt(ZT_PACKET_IDX_PAYLOAD,(uint16_t)count);
-					outp->compress();
-					outp->armor(_key,true);
-					path->send(RR,tPtr,outp->data(),outp->size(),now);
+		if (!addrs.empty()) {
+			ScopedPtr<Packet> outp(new Packet(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS));
+			outp->addSize(2); // leave room for count
+			unsigned int count = 0;
+			for(std::set<InetAddress>::iterator a(addrs.begin());a!=addrs.end();++a) {
+				uint8_t addressType = 4;
+				uint8_t addressLength = 6;
+				unsigned int ipLength = 4;
+				const void *rawIpData = (const void *)0;
+				uint16_t port = 0;
+				switch(a->ss_family) {
+					case AF_INET:
+						rawIpData = &(reinterpret_cast<const sockaddr_in *>(&(*a))->sin_addr.s_addr);
+						port = Utils::ntoh((uint16_t)reinterpret_cast<const sockaddr_in *>(&(*a))->sin_port);
+						break;
+					case AF_INET6:
+						rawIpData = reinterpret_cast<const sockaddr_in6 *>(&(*a))->sin6_addr.s6_addr;
+						port = Utils::ntoh((uint16_t)reinterpret_cast<const sockaddr_in6 *>(&(*a))->sin6_port);
+						addressType = 6;
+						addressLength = 18;
+						ipLength = 16;
+						break;
+					default:
+						continue;
 				}
 				}
+
+				outp->append((uint8_t)0); // no flags
+				outp->append((uint16_t)0); // no extensions
+				outp->append(addressType);
+				outp->append(addressLength);
+				outp->append(rawIpData,ipLength);
+				outp->append(port);
+
+				++count;
+				if (outp->size() >= (ZT_PROTO_MAX_PACKET_LENGTH - 32))
+					break;
+			}
+			if (count > 0) {
+				outp->setAt(ZT_PACKET_IDX_PAYLOAD,(uint16_t)count);
+				outp->compress();
+				outp->armor(_key,true);
+				path->send(RR,tPtr,outp->data(),outp->size(),now);
 			}
 			}
 		}
 		}
 	}
 	}
-	*/
 }
 }
 
 
 bool Peer::shouldTryPath(void *tPtr,int64_t now,const SharedPtr<Peer> &suggestedBy,const InetAddress &addr) const
 bool Peer::shouldTryPath(void *tPtr,int64_t now,const SharedPtr<Peer> &suggestedBy,const InetAddress &addr) const
 {
 {
 	int maxHaveScope = -1;
 	int maxHaveScope = -1;
 	{
 	{
-		RWMutex::RLock l(_paths_l);
+		RWMutex::RLock l(_lock);
 		for (unsigned int i = 0; i < _alivePathCount; ++i) {
 		for (unsigned int i = 0; i < _alivePathCount; ++i) {
 			if (_paths[i]) {
 			if (_paths[i]) {
 				if (_paths[i]->address().ipsEqual2(addr))
 				if (_paths[i]->address().ipsEqual2(addr))
@@ -219,9 +237,9 @@ void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atA
 	}
 	}
 }
 }
 
 
-void Peer::ping(void *tPtr,int64_t now,unsigned int &v4SendCount,unsigned int &v6SendCount,const bool pingAllAddressTypes)
+void Peer::ping(void *tPtr,int64_t now,const bool pingAllAddressTypes)
 {
 {
-	RWMutex::RLock l(_paths_l);
+	RWMutex::RLock l(_lock);
 
 
 	_lastPrioritizedPaths = now;
 	_lastPrioritizedPaths = now;
 	_prioritizePaths(now);
 	_prioritizePaths(now);
@@ -230,30 +248,29 @@ void Peer::ping(void *tPtr,int64_t now,unsigned int &v4SendCount,unsigned int &v
 		for (unsigned int i = 0; i < _alivePathCount; ++i) {
 		for (unsigned int i = 0; i < _alivePathCount; ++i) {
 			sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
 			sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
 			_paths[i]->sent(now);
 			_paths[i]->sent(now);
-
-			if (_paths[i]->address().isV4())
-				++v4SendCount;
-			else if (_paths[i]->address().isV6())
-				++v6SendCount;
-
 			if (!pingAllAddressTypes)
 			if (!pingAllAddressTypes)
-				break;
+				return;
 		}
 		}
-	} else {
-		SharedPtr<Peer> r(RR->topology->root());
-		if (r) {
-			SharedPtr<Path> rp(r->path(now));
-			if (rp) {
-				sendHELLO(tPtr,rp->localSocket(),rp->address(),now);
-				rp->sent(now);
-			}
+		return;
+	}
+
+	if (_bootstrap)
+		sendHELLO(tPtr,-1,_bootstrap,now);
+
+	SharedPtr<Peer> r(RR->topology->root());
+	if (r) {
+		SharedPtr<Path> rp(r->path(now));
+		if (rp) {
+			sendHELLO(tPtr,rp->localSocket(),rp->address(),now);
+			rp->sent(now);
+			return;
 		}
 		}
 	}
 	}
 }
 }
 
 
 void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
 void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
 {
 {
-	RWMutex::RLock l(_paths_l);
+	RWMutex::RLock l(_lock);
 	for(unsigned int i=0; i < _alivePathCount; ++i) {
 	for(unsigned int i=0; i < _alivePathCount; ++i) {
 		if ((_paths[i])&&((_paths[i]->address().ss_family == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope))) {
 		if ((_paths[i])&&((_paths[i]->address().ss_family == inetAddressFamily)&&(_paths[i]->address().ipScope() == scope))) {
 			sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
 			sendHELLO(tPtr,_paths[i]->localSocket(),_paths[i]->address(),now);
@@ -278,23 +295,23 @@ bool Peer::sendDirect(void *tPtr,const void *data,const unsigned int len,const i
 {
 {
 	if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
 	if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
 		_lastPrioritizedPaths = now;
 		_lastPrioritizedPaths = now;
-		_paths_l.lock();
+		_lock.lock();
 		_prioritizePaths(now);
 		_prioritizePaths(now);
 		if (_alivePathCount == 0) {
 		if (_alivePathCount == 0) {
-			_paths_l.unlock();
+			_lock.unlock();
 			return false;
 			return false;
 		}
 		}
 		const bool r = _paths[0]->send(RR,tPtr,data,len,now);
 		const bool r = _paths[0]->send(RR,tPtr,data,len,now);
-		_paths_l.unlock();
+		_lock.unlock();
 		return r;
 		return r;
 	} else {
 	} else {
-		_paths_l.rlock();
+		_lock.rlock();
 		if (_alivePathCount == 0) {
 		if (_alivePathCount == 0) {
-			_paths_l.runlock();
+			_lock.runlock();
 			return false;
 			return false;
 		}
 		}
 		const bool r = _paths[0]->send(RR,tPtr,data,len,now);
 		const bool r = _paths[0]->send(RR,tPtr,data,len,now);
-		_paths_l.runlock();
+		_lock.runlock();
 		return r;
 		return r;
 	}
 	}
 }
 }
@@ -303,13 +320,13 @@ SharedPtr<Path> Peer::path(const int64_t now)
 {
 {
 	if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
 	if ((now - _lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
 		_lastPrioritizedPaths = now;
 		_lastPrioritizedPaths = now;
-		RWMutex::Lock l(_paths_l);
+		RWMutex::Lock l(_lock);
 		_prioritizePaths(now);
 		_prioritizePaths(now);
 		if (_alivePathCount == 0)
 		if (_alivePathCount == 0)
 			return SharedPtr<Path>();
 			return SharedPtr<Path>();
 		return _paths[0];
 		return _paths[0];
 	} else {
 	} else {
-		RWMutex::RLock l(_paths_l);
+		RWMutex::RLock l(_lock);
 		if (_alivePathCount == 0)
 		if (_alivePathCount == 0)
 			return SharedPtr<Path>();
 			return SharedPtr<Path>();
 		return _paths[0];
 		return _paths[0];
@@ -318,14 +335,104 @@ SharedPtr<Path> Peer::path(const int64_t now)
 
 
 void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
 void Peer::getAllPaths(std::vector< SharedPtr<Path> > &paths)
 {
 {
-	RWMutex::RLock l(_paths_l);
+	RWMutex::RLock l(_lock);
 	paths.clear();
 	paths.clear();
 	paths.assign(_paths,_paths + _alivePathCount);
 	paths.assign(_paths,_paths + _alivePathCount);
 }
 }
 
 
+void Peer::save(void *tPtr) const
+{
+	uint8_t *const buf = (uint8_t *)malloc(ZT_PEER_MARSHAL_SIZE_MAX);
+	if (!buf) return;
+
+	_lock.rlock();
+	const int len = marshal(buf);
+	_lock.runlock();
+
+	if (len > 0) {
+		uint64_t id[2];
+		id[0] = _id.address().toInt();
+		id[1] = 0;
+		RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER,id,buf,(unsigned int)len);
+	}
+
+	free(buf);
+}
+
+int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const
+{
+	RWMutex::RLock l(_lock);
+
+	data[0] = 0; // serialized peer version
+
+	int s = _id.marshal(data + 1,false);
+	if (s <= 0)
+		return s;
+	int p = 1 + s;
+	s = _locator.marshal(data + p);
+	if (s <= 0)
+		return s;
+	p += s;
+	s = _bootstrap.marshal(data + p);
+	if (s <= 0)
+		return s;
+	p += s;
+
+	Utils::storeBigEndian(data + p,(uint16_t)_vProto);
+	p += 2;
+	Utils::storeBigEndian(data + p,(uint16_t)_vMajor);
+	p += 2;
+	Utils::storeBigEndian(data + p,(uint16_t)_vMinor);
+	p += 2;
+	Utils::storeBigEndian(data + p,(uint16_t)_vRevision);
+	p += 2;
+
+	data[p++] = 0;
+	data[p++] = 0;
+
+	return p;
+}
+
+int Peer::unmarshal(const uint8_t *restrict data,const int len)
+{
+	RWMutex::Lock l(_lock);
+
+	if ((len <= 1)||(data[0] != 0))
+		return -1;
+
+	int s = _id.unmarshal(data + 1,len - 1);
+	if (s <= 0)
+		return s;
+	int p = 1 + s;
+	s = _locator.unmarshal(data + p,len - p);
+	if (s <= 0)
+		return s;
+	p += s;
+	s = _bootstrap.unmarshal(data + p,len - p);
+	if (s <= 0)
+		return s;
+	p += s;
+
+	if ((p + 10) > len)
+		return -1;
+	_vProto = Utils::loadBigEndian<uint16_t>(data + p);
+	p += 2;
+	_vMajor = Utils::loadBigEndian<uint16_t>(data + p);
+	p += 2;
+	_vMinor = Utils::loadBigEndian<uint16_t>(data + p);
+	p += 2;
+	_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
+	p += 2;
+	p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
+	if (p > len)
+		return -1;
+
+	return p;
+}
+
 void Peer::_prioritizePaths(const int64_t now)
 void Peer::_prioritizePaths(const int64_t now)
 {
 {
-	// assumes _paths_l is locked for writing
+	// assumes _lock is locked for writing
 	std::sort(_paths,_paths + ZT_MAX_PEER_NETWORK_PATHS,_PathPriorityComparisonOperator());
 	std::sort(_paths,_paths + ZT_MAX_PEER_NETWORK_PATHS,_PathPriorityComparisonOperator());
 
 
 	for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
 	for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {

+ 49 - 33
node/Peer.hpp

@@ -27,33 +27,49 @@
 #include "AtomicCounter.hpp"
 #include "AtomicCounter.hpp"
 #include "Hashtable.hpp"
 #include "Hashtable.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
+#include "Locator.hpp"
 
 
 #include <vector>
 #include <vector>
 
 
+// version, identity, locator, bootstrap, version info, length of any additional fields
+#define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + ZT_INETADDRESS_MARSHAL_SIZE_MAX + (2*4) + 2)
+
 namespace ZeroTier {
 namespace ZeroTier {
 
 
+class Topology;
+
 /**
 /**
  * Peer on P2P Network (virtual layer 1)
  * Peer on P2P Network (virtual layer 1)
  */
  */
 class Peer
 class Peer
 {
 {
 	friend class SharedPtr<Peer>;
 	friend class SharedPtr<Peer>;
+	friend class Topology;
 
 
 private:
 private:
 	ZT_ALWAYS_INLINE Peer() {}
 	ZT_ALWAYS_INLINE Peer() {}
 
 
 public:
 public:
+	/**
+	 * Create an uninitialized peer
+	 *
+	 * The peer will need to be initialized with init() or unmarshal() before
+	 * it can be used.
+	 *
+	 * @param renv Runtime environment
+	 */
+	Peer(const RuntimeEnvironment *renv);
+
 	ZT_ALWAYS_INLINE ~Peer() { Utils::burn(_key,sizeof(_key)); }
 	ZT_ALWAYS_INLINE ~Peer() { Utils::burn(_key,sizeof(_key)); }
 
 
 	/**
 	/**
-	 * Construct a new peer
+	 * Initialize peer with an identity
 	 *
 	 *
-	 * @param renv Runtime environment
-	 * @param myIdentity Identity of THIS node (for key agreement)
-	 * @param peerIdentity Identity of peer
-	 * @throws std::runtime_error Key agreement with peer's identity failed
+	 * @param myIdentity This node's identity including secret key
+	 * @param peerIdentity The peer's identity
+	 * @return True if initialization was succcesful
 	 */
 	 */
-	Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity);
+	bool init(const Identity &myIdentity,const Identity &peerIdentity);
 
 
 	/**
 	/**
 	 * @return This peer's ZT address (short for identity().address())
 	 * @return This peer's ZT address (short for identity().address())
@@ -118,11 +134,9 @@ public:
 	 *
 	 *
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
 	 * @param now Current time
 	 * @param now Current time
-	 * @param v4SendCount Number of IPv4 packets sent (result parameter)
-	 * @param v6SendCount Number of IPv6 packets sent (result parameter)
 	 * @param pingAllAddressTypes If true, try to keep a link up for each address type/family
 	 * @param pingAllAddressTypes If true, try to keep a link up for each address type/family
 	 */
 	 */
-	void ping(void *tPtr,int64_t now,unsigned int &v4SendCount,unsigned int &v6SendCount,bool pingAllAddressTypes);
+	void ping(void *tPtr,int64_t now,bool pingAllAddressTypes);
 
 
 	/**
 	/**
 	 * Reset paths within a given IP scope and address family
 	 * Reset paths within a given IP scope and address family
@@ -139,6 +153,15 @@ public:
 	 */
 	 */
 	void resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now);
 	void resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now);
 
 
+	/**
+	 * Update peer latency information
+	 *
+	 * This is called from packet parsing code.
+	 *
+	 * @param l New latency measurment (in milliseconds)
+	 */
+	void updateLatency(unsigned int l);
+
 	/**
 	/**
 	 * @return Time of last receive of anything, whether direct or relayed
 	 * @return Time of last receive of anything, whether direct or relayed
 	 */
 	 */
@@ -154,15 +177,6 @@ public:
 	 */
 	 */
 	ZT_ALWAYS_INLINE unsigned int latency() const { return _latency; }
 	ZT_ALWAYS_INLINE unsigned int latency() const { return _latency; }
 
 
-	/**
-	 * Update peer latency information
-	 *
-	 * This is called from packet parsing code.
-	 *
-	 * @param l New latency measurment (in milliseconds)
-	 */
-	void updateLatency(const unsigned int l);
-
 	/**
 	/**
 	 * @return 256-bit secret symmetric encryption key
 	 * @return 256-bit secret symmetric encryption key
 	 */
 	 */
@@ -226,18 +240,6 @@ public:
 		return false;
 		return false;
 	}
 	}
 
 
-	/**
-	 * Rate limit gate for trying externally defined or static path
-	 */
-	ZT_ALWAYS_INLINE bool rateGateTryStaticPath(const int64_t now)
-	{
-		if ((now - _lastTriedStaticPath) >= ZT_PEER_PING_PERIOD) {
-			_lastTriedStaticPath = now;
-			return true;
-		}
-		return false;
-	}
-
 	/**
 	/**
 	 * Send directly if a direct path exists
 	 * Send directly if a direct path exists
 	 *
 	 *
@@ -247,7 +249,7 @@ public:
 	 * @param now Current time
 	 * @param now Current time
 	 * @return True if packet appears to have been sent, false if no path or send failed
 	 * @return True if packet appears to have been sent, false if no path or send failed
 	 */
 	 */
-	bool sendDirect(void *tPtr,const void *data,unsigned int len,const int64_t now);
+	bool sendDirect(void *tPtr,const void *data,unsigned int len,int64_t now);
 
 
 	/**
 	/**
 	 * @return Current best path
 	 * @return Current best path
@@ -261,6 +263,17 @@ public:
 	 */
 	 */
 	void getAllPaths(std::vector< SharedPtr<Path> > &paths);
 	void getAllPaths(std::vector< SharedPtr<Path> > &paths);
 
 
+	/**
+	 * Save the latest version of this peer to the data store
+	 */
+	void save(void *tPtr) const;
+
+	// NOTE: peer marshal/unmarshal only saves/restores the identity, locator, most
+	// recent bootstrap address, and version information.
+	static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_PEER_MARSHAL_SIZE_MAX; }
+	int marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const;
+	int unmarshal(const uint8_t *restrict data,int len);
+
 private:
 private:
 	void _prioritizePaths(int64_t now);
 	void _prioritizePaths(int64_t now);
 
 
@@ -272,18 +285,21 @@ private:
 	volatile int64_t _lastWhoisRequestReceived;
 	volatile int64_t _lastWhoisRequestReceived;
 	volatile int64_t _lastEchoRequestReceived;
 	volatile int64_t _lastEchoRequestReceived;
 	volatile int64_t _lastPushDirectPathsReceived;
 	volatile int64_t _lastPushDirectPathsReceived;
-	volatile int64_t _lastPushDirectPathsSent;
+	volatile int64_t _lastAttemptedP2PInit;
 	volatile int64_t _lastTriedStaticPath;
 	volatile int64_t _lastTriedStaticPath;
 	volatile int64_t _lastPrioritizedPaths;
 	volatile int64_t _lastPrioritizedPaths;
 	volatile unsigned int _latency;
 	volatile unsigned int _latency;
 
 
 	AtomicCounter __refCount;
 	AtomicCounter __refCount;
 
 
+	RWMutex _lock; // locks _alivePathCount, _paths, _locator, and _bootstrap.
+
 	unsigned int _alivePathCount;
 	unsigned int _alivePathCount;
 	SharedPtr<Path> _paths[ZT_MAX_PEER_NETWORK_PATHS];
 	SharedPtr<Path> _paths[ZT_MAX_PEER_NETWORK_PATHS];
-	RWMutex _paths_l;
 
 
 	Identity _id;
 	Identity _id;
+	Locator _locator;
+	InetAddress _bootstrap;
 
 
 	uint16_t _vProto;
 	uint16_t _vProto;
 	uint16_t _vMajor;
 	uint16_t _vMajor;

+ 34 - 12
node/SelfAwareness.cpp

@@ -19,9 +19,7 @@
 #include "Constants.hpp"
 #include "Constants.hpp"
 #include "SelfAwareness.hpp"
 #include "SelfAwareness.hpp"
 #include "RuntimeEnvironment.hpp"
 #include "RuntimeEnvironment.hpp"
-#include "Node.hpp"
 #include "Topology.hpp"
 #include "Topology.hpp"
-#include "Packet.hpp"
 #include "Peer.hpp"
 #include "Peer.hpp"
 #include "Switch.hpp"
 #include "Switch.hpp"
 #include "Trace.hpp"
 #include "Trace.hpp"
@@ -34,20 +32,16 @@ namespace ZeroTier {
 class _ResetWithinScope
 class _ResetWithinScope
 {
 {
 public:
 public:
-	inline _ResetWithinScope(void *tPtr,int64_t now,int inetAddressFamily,InetAddress::IpScope scope) :
+	ZT_ALWAYS_INLINE _ResetWithinScope(void *tPtr,int64_t now,int inetAddressFamily,InetAddress::IpScope scope) :
 		_now(now),
 		_now(now),
 		_tPtr(tPtr),
 		_tPtr(tPtr),
 		_family(inetAddressFamily),
 		_family(inetAddressFamily),
 		_scope(scope) {}
 		_scope(scope) {}
 
 
-	inline bool operator()(const SharedPtr<Peer> &p)
-	{
-		p->resetWithinScope(_tPtr,_scope,_family,_now);
-		return true;
-	}
+	ZT_ALWAYS_INLINE void operator()(const SharedPtr<Peer> &p) { p->resetWithinScope(_tPtr,_scope,_family,_now); }
 
 
 private:
 private:
-	uint64_t _now;
+	int64_t _now;
 	void *_tPtr;
 	void *_tPtr;
 	int _family;
 	int _family;
 	InetAddress::IpScope _scope;
 	InetAddress::IpScope _scope;
@@ -55,7 +49,13 @@ private:
 
 
 SelfAwareness::SelfAwareness(const RuntimeEnvironment *renv) :
 SelfAwareness::SelfAwareness(const RuntimeEnvironment *renv) :
 	RR(renv),
 	RR(renv),
-	_phy(128) {}
+	_phy(256)
+{
+}
+
+SelfAwareness::~SelfAwareness()
+{
+}
 
 
 void SelfAwareness::iam(void *tPtr,const Address &reporter,const int64_t receivedOnLocalSocket,const InetAddress &reporterPhysicalAddress,const InetAddress &myPhysicalAddress,bool trusted,int64_t now)
 void SelfAwareness::iam(void *tPtr,const Address &reporter,const int64_t receivedOnLocalSocket,const InetAddress &reporterPhysicalAddress,const InetAddress &myPhysicalAddress,bool trusted,int64_t now)
 {
 {
@@ -64,7 +64,7 @@ void SelfAwareness::iam(void *tPtr,const Address &reporter,const int64_t receive
 	if ((scope != reporterPhysicalAddress.ipScope())||(scope == InetAddress::IP_SCOPE_NONE)||(scope == InetAddress::IP_SCOPE_LOOPBACK)||(scope == InetAddress::IP_SCOPE_MULTICAST))
 	if ((scope != reporterPhysicalAddress.ipScope())||(scope == InetAddress::IP_SCOPE_NONE)||(scope == InetAddress::IP_SCOPE_LOOPBACK)||(scope == InetAddress::IP_SCOPE_MULTICAST))
 		return;
 		return;
 
 
-	Mutex::Lock _l(_phy_m);
+	Mutex::Lock l(_phy_l);
 	PhySurfaceEntry &entry = _phy[PhySurfaceKey(reporter,receivedOnLocalSocket,reporterPhysicalAddress,scope)];
 	PhySurfaceEntry &entry = _phy[PhySurfaceKey(reporter,receivedOnLocalSocket,reporterPhysicalAddress,scope)];
 
 
 	if ( (trusted) && ((now - entry.ts) < ZT_SELFAWARENESS_ENTRY_TIMEOUT) && (!entry.mySurface.ipsEqual(myPhysicalAddress)) ) {
 	if ( (trusted) && ((now - entry.ts) < ZT_SELFAWARENESS_ENTRY_TIMEOUT) && (!entry.mySurface.ipsEqual(myPhysicalAddress)) ) {
@@ -101,7 +101,7 @@ void SelfAwareness::iam(void *tPtr,const Address &reporter,const int64_t receive
 
 
 void SelfAwareness::clean(int64_t now)
 void SelfAwareness::clean(int64_t now)
 {
 {
-	Mutex::Lock _l(_phy_m);
+	Mutex::Lock l(_phy_l);
 	Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy);
 	Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy);
 	PhySurfaceKey *k = (PhySurfaceKey *)0;
 	PhySurfaceKey *k = (PhySurfaceKey *)0;
 	PhySurfaceEntry *e = (PhySurfaceEntry *)0;
 	PhySurfaceEntry *e = (PhySurfaceEntry *)0;
@@ -111,4 +111,26 @@ void SelfAwareness::clean(int64_t now)
 	}
 	}
 }
 }
 
 
+std::multimap<unsigned long,InetAddress> SelfAwareness::externalAddresses(const int64_t now) const
+{
+	Hashtable<InetAddress,unsigned long> counts;
+	{
+		Mutex::Lock l(_phy_l);
+		Hashtable<PhySurfaceKey,PhySurfaceEntry>::Iterator i(const_cast<SelfAwareness *>(this)->_phy);
+		PhySurfaceKey *k = (PhySurfaceKey *)0;
+		PhySurfaceEntry *e = (PhySurfaceEntry *)0;
+		while (i.next(k,e)) {
+			if ((now - e->ts) < ZT_SELFAWARENESS_ENTRY_TIMEOUT)
+				++counts[e->mySurface];
+		}
+	}
+	std::multimap<unsigned long,InetAddress> r;
+	Hashtable<InetAddress,unsigned long>::Iterator i(counts);
+	InetAddress *k = (InetAddress *)0;
+	unsigned long *c = (unsigned long *)0;
+	while (i.next(k,c))
+		r.insert(std::pair<unsigned long,InetAddress>(*c,*k));
+	return r;
+}
+
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 22 - 12
node/SelfAwareness.hpp

@@ -20,6 +20,8 @@
 #include "Address.hpp"
 #include "Address.hpp"
 #include "Mutex.hpp"
 #include "Mutex.hpp"
 
 
+#include <map>
+
 namespace ZeroTier {
 namespace ZeroTier {
 
 
 class RuntimeEnvironment;
 class RuntimeEnvironment;
@@ -30,10 +32,11 @@ class RuntimeEnvironment;
 class SelfAwareness
 class SelfAwareness
 {
 {
 public:
 public:
-	SelfAwareness(const RuntimeEnvironment *renv);
+	explicit SelfAwareness(const RuntimeEnvironment *renv);
+	~SelfAwareness();
 
 
 	/**
 	/**
-	 * Called when a trusted remote peer informs us of our external network address
+	 * Called when a remote peer informs us of our external network address
 	 *
 	 *
 	 * @param reporter ZeroTier address of reporting peer
 	 * @param reporter ZeroTier address of reporting peer
 	 * @param receivedOnLocalAddress Local address on which report was received
 	 * @param receivedOnLocalAddress Local address on which report was received
@@ -42,7 +45,7 @@ public:
 	 * @param trusted True if this peer is trusted as an authority to inform us of external address changes
 	 * @param trusted True if this peer is trusted as an authority to inform us of external address changes
 	 * @param now Current time
 	 * @param now Current time
 	 */
 	 */
-	void iam(void *tPtr,const Address &reporter,const int64_t receivedOnLocalSocket,const InetAddress &reporterPhysicalAddress,const InetAddress &myPhysicalAddress,bool trusted,int64_t now);
+	void iam(void *tPtr,const Address &reporter,int64_t receivedOnLocalSocket,const InetAddress &reporterPhysicalAddress,const InetAddress &myPhysicalAddress,bool trusted,int64_t now);
 
 
 	/**
 	/**
 	 * Clean up database periodically
 	 * Clean up database periodically
@@ -51,6 +54,14 @@ public:
 	 */
 	 */
 	void clean(int64_t now);
 	void clean(int64_t now);
 
 
+	/**
+	 * Get external address consensus, which is the statistical "mode" of external addresses.
+	 *
+	 * @param now Current time
+	 * @return Map of count to IP/port representing how many endpoints reported each address
+	 */
+	std::multimap<unsigned long,InetAddress> externalAddresses(int64_t now) const;
+
 private:
 private:
 	struct PhySurfaceKey
 	struct PhySurfaceKey
 	{
 	{
@@ -59,13 +70,13 @@ private:
 		InetAddress reporterPhysicalAddress;
 		InetAddress reporterPhysicalAddress;
 		InetAddress::IpScope scope;
 		InetAddress::IpScope scope;
 
 
-		inline PhySurfaceKey() : reporter(),scope(InetAddress::IP_SCOPE_NONE) {}
-		inline PhySurfaceKey(const Address &r,const int64_t rol,const InetAddress &ra,InetAddress::IpScope s) : reporter(r),receivedOnLocalSocket(rol),reporterPhysicalAddress(ra),scope(s) {}
+		ZT_ALWAYS_INLINE PhySurfaceKey() {}
+		ZT_ALWAYS_INLINE PhySurfaceKey(const Address &r,const int64_t rol,const InetAddress &ra,InetAddress::IpScope s) : reporter(r),receivedOnLocalSocket(rol),reporterPhysicalAddress(ra),scope(s) {}
 
 
-		inline unsigned long hashCode() const { return ((unsigned long)reporter.toInt() + (unsigned long)scope); }
+		ZT_ALWAYS_INLINE unsigned long hashCode() const { return ((unsigned long)reporter.toInt() + (unsigned long)receivedOnLocalSocket + (unsigned long)scope); }
 
 
-		inline bool operator==(const PhySurfaceKey &k) const { return ((reporter == k.reporter)&&(receivedOnLocalSocket == k.receivedOnLocalSocket)&&(reporterPhysicalAddress == k.reporterPhysicalAddress)&&(scope == k.scope)); }
-		inline bool operator!=(const PhySurfaceKey &k) const { return (!(*this == k)); }
+		ZT_ALWAYS_INLINE bool operator==(const PhySurfaceKey &k) const { return ((reporter == k.reporter)&&(receivedOnLocalSocket == k.receivedOnLocalSocket)&&(reporterPhysicalAddress == k.reporterPhysicalAddress)&&(scope == k.scope)); }
+		ZT_ALWAYS_INLINE bool operator!=(const PhySurfaceKey &k) const { return (!(*this == k)); }
 	};
 	};
 	struct PhySurfaceEntry
 	struct PhySurfaceEntry
 	{
 	{
@@ -73,14 +84,13 @@ private:
 		uint64_t ts;
 		uint64_t ts;
 		bool trusted;
 		bool trusted;
 
 
-		inline PhySurfaceEntry() : mySurface(),ts(0),trusted(false) {}
-		inline PhySurfaceEntry(const InetAddress &a,const uint64_t t) : mySurface(a),ts(t),trusted(false) {}
+		ZT_ALWAYS_INLINE PhySurfaceEntry() : mySurface(),ts(0),trusted(false) {}
+		ZT_ALWAYS_INLINE PhySurfaceEntry(const InetAddress &a,const uint64_t t) : mySurface(a),ts(t),trusted(false) {}
 	};
 	};
 
 
 	const RuntimeEnvironment *RR;
 	const RuntimeEnvironment *RR;
-
 	Hashtable< PhySurfaceKey,PhySurfaceEntry > _phy;
 	Hashtable< PhySurfaceKey,PhySurfaceEntry > _phy;
-	Mutex _phy_m;
+	Mutex _phy_l;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 4 - 18
node/Switch.cpp

@@ -579,34 +579,20 @@ unsigned long Switch::doTimerTasks(void *tPtr,int64_t now)
 
 
 bool Switch::_trySend(void *tPtr,Packet &packet,bool encrypt)
 bool Switch::_trySend(void *tPtr,Packet &packet,bool encrypt)
 {
 {
-	SharedPtr<Path> viaPath;
 	const int64_t now = RR->node->now();
 	const int64_t now = RR->node->now();
-	const Address destination(packet.destination());
-
-	const SharedPtr<Peer> peer(RR->topology->get(destination));
+	const SharedPtr<Peer> peer(RR->topology->get(packet.destination()));
+	SharedPtr<Path> viaPath;
 	if (peer) {
 	if (peer) {
 		viaPath = peer->path(now);
 		viaPath = peer->path(now);
 		if (!viaPath) {
 		if (!viaPath) {
-			if (peer->rateGateTryStaticPath(now)) {
-				InetAddress tryAddr;
-				bool gotPath = RR->node->externalPathLookup(tPtr,peer->identity(),AF_INET6,tryAddr);
-				if ((gotPath)&&(tryAddr)) {
-					peer->sendHELLO(tPtr,-1,tryAddr,now);
-				} else {
-					gotPath = RR->node->externalPathLookup(tPtr,peer->identity(),AF_INET,tryAddr);
-					if ((gotPath)&&(tryAddr))
-						peer->sendHELLO(tPtr,-1,tryAddr,now);
-				}
-			}
-
 			const SharedPtr<Peer> relay(RR->topology->root());
 			const SharedPtr<Peer> relay(RR->topology->root());
 			if (relay) {
 			if (relay) {
 				viaPath = relay->path(now);
 				viaPath = relay->path(now);
 				if (!viaPath)
 				if (!viaPath)
 					return false;
 					return false;
+			} else {
+				return false;
 			}
 			}
-
-			return false;
 		}
 		}
 	} else {
 	} else {
 		return false;
 		return false;

+ 38 - 4
node/Topology.cpp

@@ -44,6 +44,19 @@ Topology::~Topology()
 {
 {
 }
 }
 
 
+SharedPtr<Peer> Topology::add(void *tPtr,const SharedPtr<Peer> &peer)
+{
+	RWMutex::Lock _l(_peers_l);
+	SharedPtr<Peer> &hp = _peers[peer->address()];
+	if (hp)
+		return hp;
+	_loadCached(tPtr,peer->address(),hp);
+	if (hp)
+		return hp;
+	hp = peer;
+	return peer;
+}
+
 void Topology::getAllPeers(std::vector< SharedPtr<Peer> > &allPeers) const
 void Topology::getAllPeers(std::vector< SharedPtr<Peer> > &allPeers) const
 {
 {
 	RWMutex::RLock l(_peers_l);
 	RWMutex::RLock l(_peers_l);
@@ -97,8 +110,10 @@ void Topology::addRoot(const Identity &id)
 	std::pair< std::set<Identity>::iterator,bool > ir(_roots.insert(id));
 	std::pair< std::set<Identity>::iterator,bool > ir(_roots.insert(id));
 	if (ir.second) {
 	if (ir.second) {
 		SharedPtr<Peer> &p = _peers[id.address()];
 		SharedPtr<Peer> &p = _peers[id.address()];
-		if (!p)
-			p.set(new Peer(RR,_myIdentity,id));
+		if (!p) {
+			p.set(new Peer(RR));
+			p->init(_myIdentity,id);
+		}
 		_rootPeers.push_back(p);
 		_rootPeers.push_back(p);
 	}
 	}
 }
 }
@@ -126,7 +141,7 @@ void Topology::rankRoots(const int64_t now)
 	std::sort(_rootPeers.begin(),_rootPeers.end(),_RootSortComparisonOperator(now));
 	std::sort(_rootPeers.begin(),_rootPeers.end(),_RootSortComparisonOperator(now));
 }
 }
 
 
-void Topology::doPeriodicTasks(const int64_t now)
+void Topology::doPeriodicTasks(void *tPtr,const int64_t now)
 {
 {
 	{
 	{
 		RWMutex::Lock l1(_peers_l);
 		RWMutex::Lock l1(_peers_l);
@@ -134,8 +149,10 @@ void Topology::doPeriodicTasks(const int64_t now)
 		Address *a = (Address *)0;
 		Address *a = (Address *)0;
 		SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
 		SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
 		while (i.next(a,p)) {
 		while (i.next(a,p)) {
-			if ( (!(*p)->alive(now)) && (_roots.count((*p)->identity()) == 0) )
+			if ( (!(*p)->alive(now)) && (_roots.count((*p)->identity()) == 0) ) {
+				(*p)->save(tPtr);
 				_peers.erase(*a);
 				_peers.erase(*a);
+			}
 		}
 		}
 	}
 	}
 	{
 	{
@@ -150,4 +167,21 @@ void Topology::doPeriodicTasks(const int64_t now)
 	}
 	}
 }
 }
 
 
+void Topology::saveAll(void *tPtr)
+{
+	RWMutex::RLock l(_peers_l);
+	Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
+	Address *a = (Address *)0;
+	SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
+	while (i.next(a,p)) {
+		if ( (!(*p)->alive(RR->node->now())) && (_roots.count((*p)->identity()) == 0) ) {
+			(*p)->save((void *)0);
+		}
+	}
+}
+
+void Topology::_loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer)
+{
+}
+
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 61 - 41
node/Topology.hpp

@@ -49,7 +49,7 @@ public:
 	~Topology();
 	~Topology();
 
 
 	/**
 	/**
-	 * Add a peer to database
+	 * Add peer to database
 	 *
 	 *
 	 * This will not replace existing peers. In that case the existing peer
 	 * This will not replace existing peers. In that case the existing peer
 	 * record is returned.
 	 * record is returned.
@@ -57,14 +57,7 @@ public:
 	 * @param peer Peer to add
 	 * @param peer Peer to add
 	 * @return New or existing peer (should replace 'peer')
 	 * @return New or existing peer (should replace 'peer')
 	 */
 	 */
-	ZT_ALWAYS_INLINE SharedPtr<Peer> add(const SharedPtr<Peer> &peer)
-	{
-		RWMutex::Lock _l(_peers_l);
-		SharedPtr<Peer> &hp = _peers[peer->address()];
-		if (!hp)
-			hp = peer;
-		return hp;
-	}
+	SharedPtr<Peer> add(void *tPtr,const SharedPtr<Peer> &peer);
 
 
 	/**
 	/**
 	 * Get a peer from its address
 	 * Get a peer from its address
@@ -73,29 +66,25 @@ public:
 	 * @param zta ZeroTier address of peer
 	 * @param zta ZeroTier address of peer
 	 * @return Peer or NULL if not found
 	 * @return Peer or NULL if not found
 	 */
 	 */
-	ZT_ALWAYS_INLINE SharedPtr<Peer> get(const Address &zta) const
+	ZT_ALWAYS_INLINE SharedPtr<Peer> get(void *tPtr,const Address &zta)
 	{
 	{
-		RWMutex::RLock l1(_peers_l);
-		const SharedPtr<Peer> *const ap = _peers.get(zta);
-		return (ap) ? *ap : SharedPtr<Peer>();
-	}
-
-	/**
-	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
-	 * @param zta ZeroTier address of peer
-	 * @return Identity or NULL identity if not found
-	 */
-	ZT_ALWAYS_INLINE Identity getIdentity(void *tPtr,const Address &zta) const
-	{
-		if (zta == _myIdentity.address()) {
-			return _myIdentity;
-		} else {
+		{
 			RWMutex::RLock _l(_peers_l);
 			RWMutex::RLock _l(_peers_l);
 			const SharedPtr<Peer> *const ap = _peers.get(zta);
 			const SharedPtr<Peer> *const ap = _peers.get(zta);
 			if (ap)
 			if (ap)
-				return (*ap)->identity();
+				return *ap;
+		}
+
+		SharedPtr<Peer> p;
+		_loadCached(tPtr,zta,p);
+		if (p) {
+			RWMutex::Lock _l(_peers_l);
+			SharedPtr<Peer> &hp = _peers[zta];
+			if (!hp)
+				hp = p;
 		}
 		}
-		return Identity();
+
+		return p;
 	}
 	}
 
 
 	/**
 	/**
@@ -171,8 +160,7 @@ public:
 		Address *a = (Address *)0;
 		Address *a = (Address *)0;
 		SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
 		SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
 		while (i.next(a,p)) {
 		while (i.next(a,p)) {
-			if (!f(*((const SharedPtr<Peer> *)p)))
-				break;
+			f(*((const SharedPtr<Peer> *)p));
 		}
 		}
 	}
 	}
 
 
@@ -190,17 +178,42 @@ public:
 	{
 	{
 		RWMutex::RLock l(_peers_l);
 		RWMutex::RLock l(_peers_l);
 
 
-		std::vector<uintptr_t> rootPeerPtrs;
-		for(std::vector< SharedPtr<Peer> >::const_iterator i(_rootPeers.begin());i!=_rootPeers.end();++i)
-			rootPeerPtrs.push_back((uintptr_t)i->ptr());
-		std::sort(rootPeerPtrs.begin(),rootPeerPtrs.end());
+		const unsigned long rootPeerCnt = _rootPeers.size();
+		uintptr_t *const rootPeerPtrs = (uintptr_t *)malloc(sizeof(uintptr_t) * rootPeerCnt);
+		if (!rootPeerPtrs)
+			throw std::bad_alloc();
+		for(unsigned long i=0;i<rootPeerCnt;++i)
+			rootPeerPtrs[i] = (uintptr_t)_rootPeers[i].ptr();
+		std::sort(rootPeerPtrs,rootPeerPtrs + rootPeerCnt);
+		uintptr_t *const rootPeerPtrsEnd = rootPeerPtrs + rootPeerCnt;
+
+		try {
+			Hashtable< Address,SharedPtr<Peer> >::Iterator i(const_cast<Topology *>(this)->_peers);
+			Address *a = (Address *)0;
+			SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
+			while (i.next(a,p)) {
+				f(*((const SharedPtr<Peer> *)p),std::binary_search(rootPeerPtrs,rootPeerPtrsEnd,(uintptr_t)p->ptr()));
+			}
+		} catch ( ... ) {} // should not throw
+
+		free((void *)rootPeerPtrs);
+	}
 
 
-		Hashtable< Address,SharedPtr<Peer> >::Iterator i(const_cast<Topology *>(this)->_peers);
-		Address *a = (Address *)0;
-		SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
-		while (i.next(a,p)) {
-			if (!f(*((const SharedPtr<Peer> *)p),std::binary_search(rootPeerPtrs.begin(),rootPeerPtrs.end(),(uintptr_t)p->ptr())))
-				break;
+	/**
+	 * Iterate through all paths in the system
+	 *
+	 * @tparam F Function to call for each path
+	 * @param f
+	 */
+	template<typename F>
+	ZT_ALWAYS_INLINE void eachPath(F f) const
+	{
+		RWMutex::RLock l(_paths_l);
+		Hashtable< Path::HashKey,SharedPtr<Path> >::Iterator i(const_cast<Topology *>(this)->_paths);
+		Path::HashKey *k = (Path::HashKey *)0;
+		SharedPtr<Path> *p = (SharedPtr<Path> *)0;
+		while (i.next(k,p)) {
+			f(*((const SharedPtr<Peer> *)p));
 		}
 		}
 	}
 	}
 
 
@@ -284,14 +297,21 @@ public:
 	 *
 	 *
 	 * @param now Current time
 	 * @param now Current time
 	 */
 	 */
-	void rankRoots(const int64_t now);
+	void rankRoots(int64_t now);
 
 
 	/**
 	/**
 	 * Do periodic tasks such as database cleanup
 	 * Do periodic tasks such as database cleanup
 	 */
 	 */
-	void doPeriodicTasks(const int64_t now);
+	void doPeriodicTasks(void *tPtr,int64_t now);
+
+	/**
+	 * Save all currently known peers to data store
+	 */
+	void saveAll(void *tPtr);
 
 
 private:
 private:
+	void _loadCached(void *tPtr,const Address &zta,SharedPtr<Peer> &peer);
+
 	const RuntimeEnvironment *const RR;
 	const RuntimeEnvironment *const RR;
 	const Identity _myIdentity;
 	const Identity _myIdentity;
 
 

+ 53 - 23
node/Utils.hpp

@@ -380,41 +380,71 @@ template<typename T>
 static ZT_ALWAYS_INLINE T ntoh(T n) { return n; }
 static ZT_ALWAYS_INLINE T ntoh(T n) { return n; }
 #endif
 #endif
 
 
-static ZT_ALWAYS_INLINE uint64_t readUInt64(const void *const p)
+template<typename I>
+static ZT_ALWAYS_INLINE I loadBigEndian(const void *const p)
 {
 {
 #ifdef ZT_NO_TYPE_PUNNING
 #ifdef ZT_NO_TYPE_PUNNING
-	const uint8_t *const b = reinterpret_cast<const uint8_t *>(p);
-	return (
-		((uint64_t)b[0] << 56) |
-		((uint64_t)b[1] << 48) |
-		((uint64_t)b[2] << 40) |
-		((uint64_t)b[3] << 32) |
-		((uint64_t)b[4] << 24) |
-		((uint64_t)b[5] << 16) |
-		((uint64_t)b[6] << 8) |
-		(uint64_t)b[7]);
+	I x = (I)0;
+	for(unsigned int k=0;k<sizeof(I);++k) {
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+		reinterpret_cast<uint8_t *>(&x)[k] = reinterpret_cast<const uint8_t *>(p)[(sizeof(I)-1)-k];
+#else
+		reinterpret_cast<uint8_t *>(&x)[k] = reinterpret_cast<const uint8_t *>(p)[k];
+#endif
+	}
+	return x;
 #else
 #else
-	return ntoh(*reinterpret_cast<const uint64_t *>(p));
+	return ntoh(*reinterpret_cast<const I *>(p));
 #endif
 #endif
 }
 }
 
 
-static ZT_ALWAYS_INLINE void putUInt64(void *const p,const uint64_t i)
+template<typename I>
+static ZT_ALWAYS_INLINE void storeBigEndian(void *const p,const I i)
 {
 {
 #ifdef ZT_NO_TYPE_PUNNING
 #ifdef ZT_NO_TYPE_PUNNING
-	uint8_t *const b = reinterpret_cast<uint8_t *>(p);
-	p[0] = (uint8_t)(i << 56);
-	p[1] = (uint8_t)(i << 48);
-	p[2] = (uint8_t)(i << 40);
-	p[3] = (uint8_t)(i << 32);
-	p[4] = (uint8_t)(i << 24);
-	p[5] = (uint8_t)(i << 16);
-	p[6] = (uint8_t)(i << 8);
-	p[7] = (uint8_t)i;
+	for(unsigned int k=0;k<sizeof(I);++k) {
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+		reinterpret_cast<uint8_t *>(p)[k] = reinterpret_cast<const uint8_t *>(&i)[(sizeof(I)-1)-k];
 #else
 #else
-	*reinterpret_cast<uint64_t *>(p) = Utils::hton(i);
+		reinterpret_cast<uint8_t *>(p)[k] = reinterpret_cast<const uint8_t *>(&i)[k];
+#endif
+	}
+#else
+	*reinterpret_cast<I *>(p) = Utils::hton(i);
 #endif
 #endif
 }
 }
 
 
+#if 0
+template<typename T>
+static ZT_ALWAYS_INLINE bool isPrimitiveType() { return false; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<void *>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<const void *>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<bool>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<float>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<double>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<int8_t>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<int16_t>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<int32_t>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<int64_t>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<uint8_t>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<uint16_t>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<uint32_t>() { return true; }
+template<>
+ZT_ALWAYS_INLINE bool isPrimitiveType<uint64_t>() { return true; }
+#endif
+
 } // namespace Utils
 } // namespace Utils
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier