瀏覽代碼

Some cleanup, and use getPeerNoCache() exclusively in Cluster.

Adam Ierymenko 9 年之前
父節點
當前提交
51fcc75354
共有 3 個文件被更改,包括 40 次插入13 次删除
  1. 30 12
      node/Cluster.cpp
  2. 2 1
      node/Cluster.hpp
  3. 8 0
      node/Path.hpp

+ 30 - 12
node/Cluster.cpp

@@ -83,7 +83,8 @@ Cluster::Cluster(
 	_id(id),
 	_id(id),
 	_zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
 	_zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
 	_members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
 	_members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
-	_peerAffinities(65536)
+	_peerAffinities(65536),
+	_lastCleanedPeerAffinities(0)
 {
 {
 	uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
 	uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
 
 
@@ -247,11 +248,13 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
 						}	break;
 						}	break;
 
 
 						case STATE_MESSAGE_COM: {
 						case STATE_MESSAGE_COM: {
+							/* not currently used so not decoded yet
 							CertificateOfMembership com;
 							CertificateOfMembership com;
 							ptr += com.deserialize(dmsg,ptr);
 							ptr += com.deserialize(dmsg,ptr);
 							if (com) {
 							if (com) {
 								TRACE("[%u] COM for %s on %.16llu rev %llu",(unsigned int)fromMemberId,com.issuedTo().toString().c_str(),com.networkId(),com.revision());
 								TRACE("[%u] COM for %s on %.16llu rev %llu",(unsigned int)fromMemberId,com.issuedTo().toString().c_str(),com.networkId(),com.revision());
 							}
 							}
+							*/
 						}	break;
 						}	break;
 
 
 						case STATE_MESSAGE_PROXY_UNITE: {
 						case STATE_MESSAGE_PROXY_UNITE: {
@@ -262,12 +265,13 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
 							for(unsigned int i=0;i<numRemotePeerPaths;++i)
 							for(unsigned int i=0;i<numRemotePeerPaths;++i)
 								ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
 								ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
 
 
-							TRACE("[%u] requested proxy unite between local peer %s and remote peer %s",(unsigned int)fromMemberId,localPeerAddress.toString().c_str(),remotePeerAddress.toString().c_str());
+							TRACE("[%u] requested that we unite local %s with remote %s",(unsigned int)fromMemberId,localPeerAddress.toString().c_str(),remotePeerAddress.toString().c_str());
 
 
-							SharedPtr<Peer> localPeer(RR->topology->getPeer(localPeerAddress));
+							const uint64_t now = RR->node->now();
+							SharedPtr<Peer> localPeer(RR->topology->getPeerNoCache(localPeerAddress,now));
 							if ((localPeer)&&(numRemotePeerPaths > 0)) {
 							if ((localPeer)&&(numRemotePeerPaths > 0)) {
 								InetAddress bestLocalV4,bestLocalV6;
 								InetAddress bestLocalV4,bestLocalV6;
-								localPeer->getBestActiveAddresses(RR->node->now(),bestLocalV4,bestLocalV6);
+								localPeer->getBestActiveAddresses(now,bestLocalV4,bestLocalV6);
 
 
 								InetAddress bestRemoteV4,bestRemoteV6;
 								InetAddress bestRemoteV4,bestRemoteV6;
 								for(unsigned int i=0;i<numRemotePeerPaths;++i) {
 								for(unsigned int i=0;i<numRemotePeerPaths;++i) {
@@ -369,11 +373,11 @@ bool Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
 		else return false;
 		else return false;
 	}
 	}
 
 
-	Buffer<2048> buf;
+	Buffer<1024> buf;
 	if (unite) {
 	if (unite) {
 		InetAddress v4,v6;
 		InetAddress v4,v6;
 		if (fromPeerAddress) {
 		if (fromPeerAddress) {
-			SharedPtr<Peer> fromPeer(RR->topology->getPeer(fromPeerAddress));
+			SharedPtr<Peer> fromPeer(RR->topology->getPeerNoCache(fromPeerAddress,now));
 			if (fromPeer)
 			if (fromPeer)
 				fromPeer->getBestActiveAddresses(now,v4,v6);
 				fromPeer->getBestActiveAddresses(now,v4,v6);
 		}
 		}
@@ -408,7 +412,7 @@ bool Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee
 void Cluster::replicateHavePeer(const Identity &peerId,const InetAddress &physicalAddress)
 void Cluster::replicateHavePeer(const Identity &peerId,const InetAddress &physicalAddress)
 {
 {
 	const uint64_t now = RR->node->now();
 	const uint64_t now = RR->node->now();
-	{	// Use peer affinity table to track our own last announce time for peers
+	{
 		Mutex::Lock _l2(_peerAffinities_m);
 		Mutex::Lock _l2(_peerAffinities_m);
 		_PA &pa = _peerAffinities[peerId.address()];
 		_PA &pa = _peerAffinities[peerId.address()];
 		if (pa.mid != _id) {
 		if (pa.mid != _id) {
@@ -436,7 +440,7 @@ void Cluster::replicateHavePeer(const Identity &peerId,const InetAddress &physic
 
 
 void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group)
 void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group)
 {
 {
-	Buffer<2048> buf;
+	Buffer<1024> buf;
 	buf.append((uint64_t)nwid);
 	buf.append((uint64_t)nwid);
 	peerAddress.appendTo(buf);
 	peerAddress.appendTo(buf);
 	group.mac().appendTo(buf);
 	group.mac().appendTo(buf);
@@ -453,7 +457,7 @@ void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,co
 
 
 void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com)
 void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com)
 {
 {
-	Buffer<2048> buf;
+	Buffer<4096> buf;
 	com.serialize(buf);
 	com.serialize(buf);
 	TRACE("replicating %s COM for %.16llx to all members",com.issuedTo().toString().c_str(),com.networkId());
 	TRACE("replicating %s COM for %.16llx to all members",com.issuedTo().toString().c_str(),com.networkId());
 	{
 	{
@@ -502,6 +506,20 @@ void Cluster::doPeriodicTasks()
 			_flush(*mid); // does nothing if nothing to flush
 			_flush(*mid); // does nothing if nothing to flush
 		}
 		}
 	}
 	}
+
+	{
+		if ((now - _lastCleanedPeerAffinities) >= (ZT_PEER_ACTIVITY_TIMEOUT * 10)) {
+			_lastCleanedPeerAffinities = now;
+			Address *k = (Address *)0;
+			_PA *v = (_PA *)0;
+			Mutex::Lock _l(_peerAffinities_m);
+			Hashtable< Address,_PA >::Iterator i(_peerAffinities);
+			while (i.next(k,v)) {
+				if ((now - v->ts) >= (ZT_PEER_ACTIVITY_TIMEOUT * 10))
+					_peerAffinities.erase(*k);
+			}
+		}
+	}
 }
 }
 
 
 void Cluster::addMember(uint16_t memberId)
 void Cluster::addMember(uint16_t memberId)
@@ -563,7 +581,7 @@ bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddr
 
 
 		// Find member closest to this peer
 		// Find member closest to this peer
 		const uint64_t now = RR->node->now();
 		const uint64_t now = RR->node->now();
-		std::vector<InetAddress> best; // initial "best" is for peer to stay put
+		std::vector<InetAddress> best;
 		const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
 		const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
 		double bestDistance = (offload ? 2147483648.0 : currentDistance);
 		double bestDistance = (offload ? 2147483648.0 : currentDistance);
 		unsigned int bestMember = _id;
 		unsigned int bestMember = _id;
@@ -575,7 +593,7 @@ bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddr
 
 
 				// Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
 				// Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
 				if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
 				if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
-					double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
+					const double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
 					if (mdist < bestDistance) {
 					if (mdist < bestDistance) {
 						bestDistance = mdist;
 						bestDistance = mdist;
 						bestMember = *mid;
 						bestMember = *mid;
@@ -585,7 +603,7 @@ bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddr
 			}
 			}
 		}
 		}
 
 
-		// Suggestion redirection if a closer member was found
+		// Redirect to a closer member if it has a ZeroTier endpoint address in the same ss_family
 		for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
 		for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
 			if (a->ss_family == peerPhysicalAddress.ss_family) {
 			if (a->ss_family == peerPhysicalAddress.ss_family) {
 				TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());
 				TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());

+ 2 - 1
node/Cluster.hpp

@@ -47,7 +47,7 @@
 /**
 /**
  * Timeout for cluster members being considered "alive"
  * Timeout for cluster members being considered "alive"
  */
  */
-#define ZT_CLUSTER_TIMEOUT 10000
+#define ZT_CLUSTER_TIMEOUT 20000
 
 
 /**
 /**
  * How often should we announce that we have a peer?
  * How often should we announce that we have a peer?
@@ -349,6 +349,7 @@ private:
 	};
 	};
 	Hashtable< Address,_PA > _peerAffinities;
 	Hashtable< Address,_PA > _peerAffinities;
 	Mutex _peerAffinities_m;
 	Mutex _peerAffinities_m;
+	uint64_t _lastCleanedPeerAffinities;
 };
 };
 
 
 } // namespace ZeroTier
 } // namespace ZeroTier

+ 8 - 0
node/Path.hpp

@@ -95,6 +95,14 @@ public:
 	{
 	{
 	}
 	}
 
 
+	inline Path &operator=(const Path &p)
+		throw()
+	{
+		if (this != &p)
+			memcpy(this,&p,sizeof(Path));
+		return *this;
+	}
+
 	/**
 	/**
 	 * Called when a packet is sent to this remote path
 	 * Called when a packet is sent to this remote path
 	 *
 	 *