Browse Source

Implement continuous contacting of designated anchors and multicast replicators - GitHub issue #666

Adam Ierymenko 7 years ago
parent
commit
4419734a7d
7 changed files with 87 additions and 79 deletions
  1. 1 1
      node/IncomingPacket.cpp
  2. 13 4
      node/NetworkConfig.hpp
  3. 56 48
      node/Node.cpp
  4. 9 18
      node/RuntimeEnvironment.hpp
  5. 2 2
      node/Switch.cpp
  6. 4 4
      node/Topology.cpp
  7. 2 2
      node/Topology.hpp

+ 1 - 1
node/IncomingPacket.cpp

@@ -514,7 +514,7 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,void *tPtr,const SharedP
 
 
 bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,void *tPtr,const SharedPtr<Peer> &peer)
 bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,void *tPtr,const SharedPtr<Peer> &peer)
 {
 {
-	if ((!RR->topology->amRoot())&&(!peer->rateGateInboundWhoisRequest(RR->node->now())))
+	if ((!RR->topology->amUpstream())&&(!peer->rateGateInboundWhoisRequest(RR->node->now())))
 		return true;
 		return true;
 
 
 	Packet outp(peer->address(),RR->identity.address(),Packet::VERB_OK);
 	Packet outp(peer->address(),RR->identity.address(),Packet::VERB_OK);

+ 13 - 4
node/NetworkConfig.hpp

@@ -47,6 +47,7 @@
 #include "Capability.hpp"
 #include "Capability.hpp"
 #include "Tag.hpp"
 #include "Tag.hpp"
 #include "Dictionary.hpp"
 #include "Dictionary.hpp"
+#include "Hashtable.hpp"
 #include "Identity.hpp"
 #include "Identity.hpp"
 #include "Utils.hpp"
 #include "Utils.hpp"
 #include "Trace.hpp"
 #include "Trace.hpp"
@@ -317,6 +318,18 @@ public:
 		return r;
 		return r;
 	}
 	}
 
 
+	/**
+	 * Add addresses that we should attempt to stay connected to to a set
+	 */
+	inline void getAlwaysContactAddresses(Hashtable< Address,std::vector<InetAddress> > &a) const
+	{
+		for(unsigned int i=0;i<specialistCount;++i) {
+			if ((specialists[i] & (ZT_NETWORKCONFIG_SPECIALIST_TYPE_ANCHOR | ZT_NETWORKCONFIG_SPECIALIST_TYPE_MULTICAST_REPLICATOR)) != 0) {
+				a[Address(specialists[i])];
+			}
+		}
+	}
+
 	/**
 	/**
 	 * @param fromPeer Peer attempting to bridge other Ethernet peers onto network
 	 * @param fromPeer Peer attempting to bridge other Ethernet peers onto network
 	 * @return True if this network allows bridging
 	 * @return True if this network allows bridging
@@ -332,11 +345,7 @@ public:
 		return false;
 		return false;
 	}
 	}
 
 
-	/**
-	 * @return True if this network config is non-NULL
-	 */
 	inline operator bool() const { return (networkId != 0); }
 	inline operator bool() const { return (networkId != 0); }
-
 	inline bool operator==(const NetworkConfig &nc) const { return (memcmp(this,&nc,sizeof(NetworkConfig)) == 0); }
 	inline bool operator==(const NetworkConfig &nc) const { return (memcmp(this,&nc,sizeof(NetworkConfig)) == 0); }
 	inline bool operator!=(const NetworkConfig &nc) const { return (!(*this == nc)); }
 	inline bool operator!=(const NetworkConfig &nc) const { return (!(*this == nc)); }
 
 

+ 56 - 48
node/Node.cpp

@@ -176,31 +176,25 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
 class _PingPeersThatNeedPing
 class _PingPeersThatNeedPing
 {
 {
 public:
 public:
-	_PingPeersThatNeedPing(const RuntimeEnvironment *renv,void *tPtr,Hashtable< Address,std::vector<InetAddress> > &upstreamsToContact,int64_t now) :
-		lastReceiveFromUpstream(0),
+	_PingPeersThatNeedPing(const RuntimeEnvironment *renv,void *tPtr,Hashtable< Address,std::vector<InetAddress> > &alwaysContact,int64_t now) :
 		RR(renv),
 		RR(renv),
 		_tPtr(tPtr),
 		_tPtr(tPtr),
-		_upstreamsToContact(upstreamsToContact),
+		_alwaysContact(alwaysContact),
 		_now(now),
 		_now(now),
 		_bestCurrentUpstream(RR->topology->getUpstreamPeer())
 		_bestCurrentUpstream(RR->topology->getUpstreamPeer())
 	{
 	{
 	}
 	}
 
 
-	int64_t lastReceiveFromUpstream; // tracks last time we got a packet from an 'upstream' peer like a root or a relay
-
 	inline void operator()(Topology &t,const SharedPtr<Peer> &p)
 	inline void operator()(Topology &t,const SharedPtr<Peer> &p)
 	{
 	{
-		const std::vector<InetAddress> *const upstreamStableEndpoints = _upstreamsToContact.get(p->address());
-		if (upstreamStableEndpoints) {
-			// Upstreams must be pinged constantly over both IPv4 and IPv6 to allow
-			// them to perform three way handshake introductions for both stacks.
-
+		const std::vector<InetAddress> *const alwaysContactEndpoints = _alwaysContact.get(p->address());
+		if (alwaysContactEndpoints) {
 			const unsigned int sent = p->doPingAndKeepalive(_tPtr,_now);
 			const unsigned int sent = p->doPingAndKeepalive(_tPtr,_now);
 			bool contacted = (sent != 0);
 			bool contacted = (sent != 0);
 
 
 			if ((sent & 0x1) == 0) { // bit 0x1 == IPv4 sent
 			if ((sent & 0x1) == 0) { // bit 0x1 == IPv4 sent
-				for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)upstreamStableEndpoints->size();++k) {
-					const InetAddress &addr = (*upstreamStableEndpoints)[ptr++ % upstreamStableEndpoints->size()];
+				for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)alwaysContactEndpoints->size();++k) {
+					const InetAddress &addr = (*alwaysContactEndpoints)[ptr++ % alwaysContactEndpoints->size()];
 					if (addr.ss_family == AF_INET) {
 					if (addr.ss_family == AF_INET) {
 						p->sendHELLO(_tPtr,-1,addr,_now);
 						p->sendHELLO(_tPtr,-1,addr,_now);
 						contacted = true;
 						contacted = true;
@@ -210,8 +204,8 @@ public:
 			}
 			}
 
 
 			if ((sent & 0x2) == 0) { // bit 0x2 == IPv6 sent
 			if ((sent & 0x2) == 0) { // bit 0x2 == IPv6 sent
-				for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)upstreamStableEndpoints->size();++k) {
-					const InetAddress &addr = (*upstreamStableEndpoints)[ptr++ % upstreamStableEndpoints->size()];
+				for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)alwaysContactEndpoints->size();++k) {
+					const InetAddress &addr = (*alwaysContactEndpoints)[ptr++ % alwaysContactEndpoints->size()];
 					if (addr.ss_family == AF_INET6) {
 					if (addr.ss_family == AF_INET6) {
 						p->sendHELLO(_tPtr,-1,addr,_now);
 						p->sendHELLO(_tPtr,-1,addr,_now);
 						contacted = true;
 						contacted = true;
@@ -220,19 +214,14 @@ public:
 				}
 				}
 			}
 			}
 
 
-			// If we have no memoized addresses for this upstream peer, attempt to contact
-			// it indirectly so we will be introduced.
 			if ((!contacted)&&(_bestCurrentUpstream)) {
 			if ((!contacted)&&(_bestCurrentUpstream)) {
 				const SharedPtr<Path> up(_bestCurrentUpstream->getBestPath(_now,true));
 				const SharedPtr<Path> up(_bestCurrentUpstream->getBestPath(_now,true));
 				if (up)
 				if (up)
 					p->sendHELLO(_tPtr,up->localSocket(),up->address(),_now);
 					p->sendHELLO(_tPtr,up->localSocket(),up->address(),_now);
 			}
 			}
 
 
-			lastReceiveFromUpstream = std::max(p->lastReceive(),lastReceiveFromUpstream);
-
-			_upstreamsToContact.erase(p->address()); // after this we'll WHOIS all upstreams that remain
+			_alwaysContact.erase(p->address()); // after this we'll WHOIS all upstreams that remain
 		} else if (p->isActive(_now)) {
 		} else if (p->isActive(_now)) {
-			// Regular non-upstream nodes get pinged if they appear active.
 			p->doPingAndKeepalive(_tPtr,_now);
 			p->doPingAndKeepalive(_tPtr,_now);
 		}
 		}
 	}
 	}
@@ -240,7 +229,7 @@ public:
 private:
 private:
 	const RuntimeEnvironment *RR;
 	const RuntimeEnvironment *RR;
 	void *_tPtr;
 	void *_tPtr;
-	Hashtable< Address,std::vector<InetAddress> > &_upstreamsToContact;
+	Hashtable< Address,std::vector<InetAddress> > &_alwaysContact;
 	const int64_t _now;
 	const int64_t _now;
 	const SharedPtr<Peer> _bestCurrentUpstream;
 	const SharedPtr<Peer> _bestCurrentUpstream;
 };
 };
@@ -256,41 +245,60 @@ ZT_ResultCode Node::processBackgroundTasks(void *tptr,int64_t now,volatile int64
 		try {
 		try {
 			_lastPingCheck = now;
 			_lastPingCheck = now;
 
 
-			// Do pings and keepalives
-			Hashtable< Address,std::vector<InetAddress> > upstreamsToContact;
-			RR->topology->getUpstreamsToContact(upstreamsToContact);
-			_PingPeersThatNeedPing pfunc(RR,tptr,upstreamsToContact,now);
-			RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
-
-			// Run WHOIS to create Peer for any upstreams we could not contact (including pending moon seeds)
-			Hashtable< Address,std::vector<InetAddress> >::Iterator i(upstreamsToContact);
-			Address *upstreamAddress = (Address *)0;
-			std::vector<InetAddress> *upstreamStableEndpoints = (std::vector<InetAddress> *)0;
-			while (i.next(upstreamAddress,upstreamStableEndpoints))
-				RR->sw->requestWhois(tptr,now,*upstreamAddress);
+			// Get designated VL1 upstreams
+			Hashtable< Address,std::vector<InetAddress> > alwaysContact;
+			RR->topology->getUpstreamsToContact(alwaysContact);
 
 
-			// Get networks that need config without leaving mutex locked
+			// Check last receive time on designated upstreams to see if we seem to be online
+			int64_t lastReceivedFromUpstream = 0;
 			{
 			{
-				std::vector< std::pair< SharedPtr<Network>,bool > > nwl;
-				{
-					Mutex::Lock _l(_networks_m);
-					nwl.reserve(_networks.size()+1);
-					Hashtable< uint64_t,SharedPtr<Network> >::Iterator i(_networks);
-					uint64_t *k = (uint64_t *)0;
-					SharedPtr<Network> *v = (SharedPtr<Network> *)0;
-					while (i.next(k,v))
-						nwl.push_back( std::pair< SharedPtr<Network>,bool >(*v,(((now - (*v)->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!(*v)->hasConfig()))) );
+				Hashtable< Address,std::vector<InetAddress> >::Iterator i(alwaysContact);
+				Address *upstreamAddress = (Address *)0;
+				std::vector<InetAddress> *upstreamStableEndpoints = (std::vector<InetAddress> *)0;
+				while (i.next(upstreamAddress,upstreamStableEndpoints)) {
+					SharedPtr<Peer> p(RR->topology->getPeerNoCache(*upstreamAddress));
+					if (p)
+						lastReceivedFromUpstream = std::max(p->lastReceive(),lastReceivedFromUpstream);
 				}
 				}
-				for(std::vector< std::pair< SharedPtr<Network>,bool > >::const_iterator n(nwl.begin());n!=nwl.end();++n) {
-					if (n->second)
-						n->first->requestConfiguration(tptr);
-					n->first->sendUpdatesToMembers(tptr);
+			}
+
+			// Get peers we should stay connected to according to network configs
+			// Also get networks and whether they need config
+			std::vector< std::pair< SharedPtr<Network>,bool > > networkConfigNeeded;
+			{
+				Mutex::Lock l(_networks_m);
+				Hashtable< uint64_t,SharedPtr<Network> >::Iterator i(_networks);
+				uint64_t *nwid = (uint64_t *)0;
+				SharedPtr<Network> *network = (SharedPtr<Network> *)0;
+				while (i.next(nwid,network)) {
+					(*network)->config().getAlwaysContactAddresses(alwaysContact);
+					networkConfigNeeded.push_back( std::pair< SharedPtr<Network>,bool >(*network,(((now - (*network)->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!(*network)->hasConfig()))) );
 				}
 				}
 			}
 			}
 
 
+			// Ping active peers, upstreams, and others that we should always contact
+			_PingPeersThatNeedPing pfunc(RR,tptr,alwaysContact,now);
+			RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
+
+			// Run WHOIS to create Peer for alwaysContact addresses that could not be contacted
+			{
+				Hashtable< Address,std::vector<InetAddress> >::Iterator i(alwaysContact);
+				Address *upstreamAddress = (Address *)0;
+				std::vector<InetAddress> *upstreamStableEndpoints = (std::vector<InetAddress> *)0;
+				while (i.next(upstreamAddress,upstreamStableEndpoints))
+					RR->sw->requestWhois(tptr,now,*upstreamAddress);
+			}
+
+			// Refresh network config or broadcast network updates to members as needed
+			for(std::vector< std::pair< SharedPtr<Network>,bool > >::const_iterator n(networkConfigNeeded.begin());n!=networkConfigNeeded.end();++n) {
+				if (n->second)
+					n->first->requestConfiguration(tptr);
+				n->first->sendUpdatesToMembers(tptr);
+			}
+
 			// Update online status, post status change as event
 			// Update online status, post status change as event
 			const bool oldOnline = _online;
 			const bool oldOnline = _online;
-			_online = (((now - pfunc.lastReceiveFromUpstream) < ZT_PEER_ACTIVITY_TIMEOUT)||(RR->topology->amRoot()));
+			_online = (((now - lastReceivedFromUpstream) < ZT_PEER_ACTIVITY_TIMEOUT)||(RR->topology->amUpstream()));
 			if (oldOnline != _online)
 			if (oldOnline != _online)
 				postEvent(tptr,_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
 				postEvent(tptr,_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
 		} catch ( ... ) {
 		} catch ( ... ) {

+ 9 - 18
node/RuntimeEnvironment.hpp

@@ -52,16 +52,14 @@ class RuntimeEnvironment
 public:
 public:
 	RuntimeEnvironment(Node *n) :
 	RuntimeEnvironment(Node *n) :
 		node(n)
 		node(n)
-		,identity()
 		,localNetworkController((NetworkController *)0)
 		,localNetworkController((NetworkController *)0)
 		,sw((Switch *)0)
 		,sw((Switch *)0)
 		,mc((Multicaster *)0)
 		,mc((Multicaster *)0)
 		,topology((Topology *)0)
 		,topology((Topology *)0)
 		,sa((SelfAwareness *)0)
 		,sa((SelfAwareness *)0)
 	{
 	{
-		Utils::getSecureRandom(&instanceId,sizeof(instanceId));
-		memset(publicIdentityStr,0,sizeof(publicIdentityStr));
-		memset(secretIdentityStr,0,sizeof(secretIdentityStr));
+		publicIdentityStr[0] = (char)0;
+		secretIdentityStr[0] = (char)0;
 	}
 	}
 
 
 	~RuntimeEnvironment()
 	~RuntimeEnvironment()
@@ -69,35 +67,28 @@ public:
 		Utils::burn(secretIdentityStr,sizeof(secretIdentityStr));
 		Utils::burn(secretIdentityStr,sizeof(secretIdentityStr));
 	}
 	}
 
 
-	/**
-	 * A random integer identifying this running instance in a cluster
-	 */
-	uint64_t instanceId;
-
 	// Node instance that owns this RuntimeEnvironment
 	// Node instance that owns this RuntimeEnvironment
 	Node *const node;
 	Node *const node;
 
 
-	// This node's identity
-	Identity identity;
-	char publicIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
-	char secretIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
-
 	// This is set externally to an instance of this base class
 	// This is set externally to an instance of this base class
 	NetworkController *localNetworkController;
 	NetworkController *localNetworkController;
 
 
-	/*
-	 * Order matters a bit here. These are constructed in this order
+	/* Order matters a bit here. These are constructed in this order
 	 * and then deleted in the opposite order on Node exit. The order ensures
 	 * and then deleted in the opposite order on Node exit. The order ensures
 	 * that things that are needed are there before they're needed.
 	 * that things that are needed are there before they're needed.
 	 *
 	 *
-	 * These are constant and never null after startup unless indicated.
-	 */
+	 * These are constant and never null after startup unless indicated. */
 
 
 	Trace *t;
 	Trace *t;
 	Switch *sw;
 	Switch *sw;
 	Multicaster *mc;
 	Multicaster *mc;
 	Topology *topology;
 	Topology *topology;
 	SelfAwareness *sa;
 	SelfAwareness *sa;
+
+	// This node's identity and string representations thereof
+	Identity identity;
+	char publicIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
+	char secretIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 2 - 2
node/Switch.cpp

@@ -91,7 +91,7 @@ void Switch::onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddre
 				const Address destination(fragment.destination());
 				const Address destination(fragment.destination());
 
 
 				if (destination != RR->identity.address()) {
 				if (destination != RR->identity.address()) {
-					if ( (!RR->topology->amRoot()) && (!path->trustEstablished(now)) )
+					if ( (!RR->topology->amUpstream()) && (!path->trustEstablished(now)) )
 						return;
 						return;
 
 
 					if (fragment.hops() < ZT_RELAY_MAX_HOPS) {
 					if (fragment.hops() < ZT_RELAY_MAX_HOPS) {
@@ -162,7 +162,7 @@ void Switch::onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddre
 					return;
 					return;
 
 
 				if (destination != RR->identity.address()) {
 				if (destination != RR->identity.address()) {
-					if ( (!RR->topology->amRoot()) && (!path->trustEstablished(now)) && (source != RR->identity.address()) )
+					if ( (!RR->topology->amUpstream()) && (!path->trustEstablished(now)) && (source != RR->identity.address()) )
 						return;
 						return;
 
 
 					Packet packet(data,len);
 					Packet packet(data,len);

+ 4 - 4
node/Topology.cpp

@@ -66,7 +66,7 @@ static const unsigned char ZT_DEFAULT_WORLD[ZT_DEFAULT_WORLD_LENGTH] = {0x01,0x0
 Topology::Topology(const RuntimeEnvironment *renv,void *tPtr) :
 Topology::Topology(const RuntimeEnvironment *renv,void *tPtr) :
 	RR(renv),
 	RR(renv),
 	_numConfiguredPhysicalPaths(0),
 	_numConfiguredPhysicalPaths(0),
-	_amRoot(false)
+	_amUpstream(false)
 {
 {
 	uint8_t tmp[ZT_WORLD_MAX_SERIALIZED_LENGTH];
 	uint8_t tmp[ZT_WORLD_MAX_SERIALIZED_LENGTH];
 	uint64_t idtmp[2];
 	uint64_t idtmp[2];
@@ -398,11 +398,11 @@ void Topology::_memoizeUpstreams(void *tPtr)
 {
 {
 	// assumes _upstreams_m and _peers_m are locked
 	// assumes _upstreams_m and _peers_m are locked
 	_upstreamAddresses.clear();
 	_upstreamAddresses.clear();
-	_amRoot = false;
+	_amUpstream = false;
 
 
 	for(std::vector<World::Root>::const_iterator i(_planet.roots().begin());i!=_planet.roots().end();++i) {
 	for(std::vector<World::Root>::const_iterator i(_planet.roots().begin());i!=_planet.roots().end();++i) {
 		if (i->identity == RR->identity) {
 		if (i->identity == RR->identity) {
-			_amRoot = true;
+			_amUpstream = true;
 		} else if (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),i->identity.address()) == _upstreamAddresses.end()) {
 		} else if (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),i->identity.address()) == _upstreamAddresses.end()) {
 			_upstreamAddresses.push_back(i->identity.address());
 			_upstreamAddresses.push_back(i->identity.address());
 			SharedPtr<Peer> &hp = _peers[i->identity.address()];
 			SharedPtr<Peer> &hp = _peers[i->identity.address()];
@@ -414,7 +414,7 @@ void Topology::_memoizeUpstreams(void *tPtr)
 	for(std::vector<World>::const_iterator m(_moons.begin());m!=_moons.end();++m) {
 	for(std::vector<World>::const_iterator m(_moons.begin());m!=_moons.end();++m) {
 		for(std::vector<World::Root>::const_iterator i(m->roots().begin());i!=m->roots().end();++i) {
 		for(std::vector<World::Root>::const_iterator i(m->roots().begin());i!=m->roots().end();++i) {
 			if (i->identity == RR->identity) {
 			if (i->identity == RR->identity) {
-				_amRoot = true;
+				_amUpstream = true;
 			} else if (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),i->identity.address()) == _upstreamAddresses.end()) {
 			} else if (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),i->identity.address()) == _upstreamAddresses.end()) {
 				_upstreamAddresses.push_back(i->identity.address());
 				_upstreamAddresses.push_back(i->identity.address());
 				SharedPtr<Peer> &hp = _peers[i->identity.address()];
 				SharedPtr<Peer> &hp = _peers[i->identity.address()];

+ 2 - 2
node/Topology.hpp

@@ -336,7 +336,7 @@ public:
 	/**
 	/**
 	 * @return True if I am a root server in a planet or moon
 	 * @return True if I am a root server in a planet or moon
 	 */
 	 */
-	inline bool amRoot() const { return _amRoot; }
+	inline bool amUpstream() const { return _amUpstream; }
 
 
 	/**
 	/**
 	 * Get info about a path
 	 * Get info about a path
@@ -460,7 +460,7 @@ private:
 	std::vector<World> _moons;
 	std::vector<World> _moons;
 	std::vector< std::pair<uint64_t,Address> > _moonSeeds;
 	std::vector< std::pair<uint64_t,Address> > _moonSeeds;
 	std::vector<Address> _upstreamAddresses;
 	std::vector<Address> _upstreamAddresses;
-	bool _amRoot;
+	bool _amUpstream;
 	Mutex _upstreams_m; // locks worlds, upstream info, moon info, etc.
 	Mutex _upstreams_m; // locks worlds, upstream info, moon info, etc.
 };
 };