Path.hpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * Copyright (c)2019 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2023-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_PATH_HPP
  14. #define ZT_PATH_HPP
  15. #include <stdint.h>
  16. #include <string.h>
  17. #include <stdlib.h>
  18. #include <stdexcept>
  19. #include <algorithm>
  20. #include "Constants.hpp"
  21. #include "InetAddress.hpp"
  22. #include "SharedPtr.hpp"
  23. #include "AtomicCounter.hpp"
  24. #include "Utils.hpp"
  25. #include "RingBuffer.hpp"
  26. #include "Packet.hpp"
  27. /**
  28. * Maximum return value of preferenceRank()
  29. */
  30. #define ZT_PATH_MAX_PREFERENCE_RANK ((ZT_INETADDRESS_MAX_SCOPE << 1) | 1)
  31. namespace ZeroTier {
  32. class RuntimeEnvironment;
  33. /**
  34. * A path across the physical network
  35. */
  36. class Path
  37. {
  38. friend class SharedPtr<Path>;
  39. public:
  40. /**
  41. * Efficient unique key for paths in a Hashtable
  42. */
  43. class HashKey
  44. {
  45. public:
  46. ZT_ALWAYS_INLINE HashKey() {}
  47. ZT_ALWAYS_INLINE HashKey(const int64_t l,const InetAddress &r)
  48. {
  49. if (r.ss_family == AF_INET) {
  50. _k[0] = (uint64_t)reinterpret_cast<const struct sockaddr_in *>(&r)->sin_addr.s_addr;
  51. _k[1] = (uint64_t)reinterpret_cast<const struct sockaddr_in *>(&r)->sin_port;
  52. _k[2] = (uint64_t)l;
  53. } else if (r.ss_family == AF_INET6) {
  54. memcpy(_k,reinterpret_cast<const struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,16);
  55. _k[2] = ((uint64_t)reinterpret_cast<const struct sockaddr_in6 *>(&r)->sin6_port << 32) ^ (uint64_t)l;
  56. } else {
  57. memcpy(_k,&r,std::min(sizeof(_k),sizeof(InetAddress)));
  58. _k[2] += (uint64_t)l;
  59. }
  60. }
  61. ZT_ALWAYS_INLINE unsigned long hashCode() const { return (unsigned long)(_k[0] + _k[1] + _k[2]); }
  62. ZT_ALWAYS_INLINE bool operator==(const HashKey &k) const { return ( (_k[0] == k._k[0]) && (_k[1] == k._k[1]) && (_k[2] == k._k[2]) ); }
  63. ZT_ALWAYS_INLINE bool operator!=(const HashKey &k) const { return (!(*this == k)); }
  64. private:
  65. uint64_t _k[3];
  66. };
  67. ZT_ALWAYS_INLINE Path() :
  68. _lastOut(0),
  69. _lastIn(0),
  70. _lastPathQualityComputeTime(0),
  71. _localSocket(-1),
  72. _latency(0xffff),
  73. _addr(),
  74. _ipScope(InetAddress::IP_SCOPE_NONE),
  75. _lastAck(0),
  76. _lastThroughputEstimation(0),
  77. _lastQoSMeasurement(0),
  78. _lastQoSRecordPurge(0),
  79. _unackedBytes(0),
  80. _expectingAckAsOf(0),
  81. _packetsReceivedSinceLastAck(0),
  82. _packetsReceivedSinceLastQoS(0),
  83. _maxLifetimeThroughput(0),
  84. _lastComputedMeanThroughput(0),
  85. _bytesAckedSinceLastThroughputEstimation(0),
  86. _lastComputedMeanLatency(0.0),
  87. _lastComputedPacketDelayVariance(0.0),
  88. _lastComputedPacketErrorRatio(0.0),
  89. _lastComputedPacketLossRatio(0),
  90. _lastComputedStability(0.0),
  91. _lastComputedRelativeQuality(0),
  92. _lastComputedThroughputDistCoeff(0.0),
  93. _lastAllocation(0)
  94. {
  95. memset(_ifname, 0, 16);
  96. memset(_addrString, 0, sizeof(_addrString));
  97. }
  98. ZT_ALWAYS_INLINE Path(const int64_t localSocket,const InetAddress &addr) :
  99. _lastOut(0),
  100. _lastIn(0),
  101. _lastPathQualityComputeTime(0),
  102. _localSocket(localSocket),
  103. _latency(0xffff),
  104. _addr(addr),
  105. _ipScope(addr.ipScope()),
  106. _lastAck(0),
  107. _lastThroughputEstimation(0),
  108. _lastQoSMeasurement(0),
  109. _lastQoSRecordPurge(0),
  110. _unackedBytes(0),
  111. _expectingAckAsOf(0),
  112. _packetsReceivedSinceLastAck(0),
  113. _packetsReceivedSinceLastQoS(0),
  114. _maxLifetimeThroughput(0),
  115. _lastComputedMeanThroughput(0),
  116. _bytesAckedSinceLastThroughputEstimation(0),
  117. _lastComputedMeanLatency(0.0),
  118. _lastComputedPacketDelayVariance(0.0),
  119. _lastComputedPacketErrorRatio(0.0),
  120. _lastComputedPacketLossRatio(0),
  121. _lastComputedStability(0.0),
  122. _lastComputedRelativeQuality(0),
  123. _lastComputedThroughputDistCoeff(0.0),
  124. _lastAllocation(0)
  125. {
  126. memset(_ifname, 0, 16);
  127. memset(_addrString, 0, sizeof(_addrString));
  128. if (_localSocket != -1) {
  129. // TODO: add localInterface alongside localSocket
  130. //_phy->getIfName((PhySocket *) ((uintptr_t) _localSocket), _ifname, 16);
  131. }
  132. }
  133. /**
  134. * Called when a packet is received from this remote path, regardless of content
  135. *
  136. * @param t Time of receive
  137. */
  138. ZT_ALWAYS_INLINE void received(const uint64_t t) { _lastIn = t; }
  139. /**
  140. * Send a packet via this path (last out time is also updated)
  141. *
  142. * @param RR Runtime environment
  143. * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
  144. * @param data Packet data
  145. * @param len Packet length
  146. * @param now Current time
  147. * @return True if transport reported success
  148. */
  149. bool send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now);
  150. /**
  151. * Manually update last sent time
  152. *
  153. * @param t Time of send
  154. */
  155. ZT_ALWAYS_INLINE void sent(const int64_t t) { _lastOut = t; }
  156. /**
  157. * Update path latency with a new measurement
  158. *
  159. * @param l Measured latency
  160. */
  161. ZT_ALWAYS_INLINE void updateLatency(const unsigned int l, int64_t now)
  162. {
  163. unsigned int pl = _latency;
  164. if (pl < 0xffff) {
  165. _latency = (pl + l) / 2;
  166. }
  167. else {
  168. _latency = l;
  169. }
  170. _latencySamples.push(l);
  171. }
  172. /**
  173. * @return Local socket as specified by external code
  174. */
  175. ZT_ALWAYS_INLINE int64_t localSocket() const { return _localSocket; }
  176. /**
  177. * @return Physical address
  178. */
  179. ZT_ALWAYS_INLINE const InetAddress &address() const { return _addr; }
  180. /**
  181. * @return IP scope -- faster shortcut for address().ipScope()
  182. */
  183. ZT_ALWAYS_INLINE InetAddress::IpScope ipScope() const { return _ipScope; }
  184. /**
  185. * @return Preference rank, higher == better
  186. */
  187. ZT_ALWAYS_INLINE unsigned int preferenceRank() const
  188. {
  189. // This causes us to rank paths in order of IP scope rank (see InetAdddress.hpp) but
  190. // within each IP scope class to prefer IPv6 over IPv4.
  191. return ( ((unsigned int)_ipScope << 1) | (unsigned int)(_addr.ss_family == AF_INET6) );
  192. }
  193. /**
  194. * Check whether this address is valid for a ZeroTier path
  195. *
  196. * This checks the address type and scope against address types and scopes
  197. * that we currently support for ZeroTier communication.
  198. *
  199. * @param a Address to check
  200. * @return True if address is good for ZeroTier path use
  201. */
  202. static ZT_ALWAYS_INLINE bool isAddressValidForPath(const InetAddress &a)
  203. {
  204. if ((a.ss_family == AF_INET)||(a.ss_family == AF_INET6)) {
  205. switch(a.ipScope()) {
  206. /* Note: we don't do link-local at the moment. Unfortunately these
  207. * cause several issues. The first is that they usually require a
  208. * device qualifier, which we don't handle yet and can't portably
  209. * push in PUSH_DIRECT_PATHS. The second is that some OSes assign
  210. * these very ephemerally or otherwise strangely. So we'll use
  211. * private, pseudo-private, shared (e.g. carrier grade NAT), or
  212. * global IP addresses. */
  213. case InetAddress::IP_SCOPE_PRIVATE:
  214. case InetAddress::IP_SCOPE_PSEUDOPRIVATE:
  215. case InetAddress::IP_SCOPE_SHARED:
  216. case InetAddress::IP_SCOPE_GLOBAL:
  217. if (a.ss_family == AF_INET6) {
  218. // TEMPORARY HACK: for now, we are going to blacklist he.net IPv6
  219. // tunnels due to very spotty performance and low MTU issues over
  220. // these IPv6 tunnel links.
  221. const uint8_t *ipd = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr);
  222. if ((ipd[0] == 0x20)&&(ipd[1] == 0x01)&&(ipd[2] == 0x04)&&(ipd[3] == 0x70))
  223. return false;
  224. }
  225. return true;
  226. default:
  227. return false;
  228. }
  229. }
  230. return false;
  231. }
  232. /**
  233. * @return Latency or 0xffff if unknown
  234. */
  235. ZT_ALWAYS_INLINE unsigned int latency() const { return _latency; }
  236. /**
  237. * @return Path quality -- lower is better
  238. */
  239. ZT_ALWAYS_INLINE long quality(const int64_t now) const
  240. {
  241. const long l = (long)_latency;
  242. const long age = (long)std::min((long)(now - _lastIn),(long)(ZT_PEER_PING_PERIOD * 10)); // set an upper sanity limit to avoid overflow
  243. return ( ( (age < (ZT_PEER_PING_PERIOD + 5000)) ? l : (l + 65535 + age) ) * (long)((ZT_INETADDRESS_MAX_SCOPE - _ipScope) + 1));
  244. }
  245. /**
  246. * Record statistics on outgoing packets. Used later to estimate QoS metrics.
  247. *
  248. * @param now Current time
  249. * @param packetId ID of packet
  250. * @param payloadLength Length of payload
  251. * @param verb Packet verb
  252. */
  253. ZT_ALWAYS_INLINE void recordOutgoingPacket(int64_t now, int64_t packetId, uint16_t payloadLength, Packet::Verb verb)
  254. {
  255. Mutex::Lock _l(_statistics_m);
  256. if (verb != Packet::VERB_ACK && verb != Packet::VERB_QOS_MEASUREMENT) {
  257. if ((packetId & (ZT_PATH_QOS_ACK_PROTOCOL_DIVISOR - 1)) == 0) {
  258. _unackedBytes += payloadLength;
  259. // Take note that we're expecting a VERB_ACK on this path as of a specific time
  260. _expectingAckAsOf = ackAge(now) > ZT_PATH_ACK_INTERVAL ? _expectingAckAsOf : now;
  261. if (_outQoSRecords.size() < ZT_PATH_MAX_OUTSTANDING_QOS_RECORDS) {
  262. _outQoSRecords[packetId] = now;
  263. }
  264. }
  265. }
  266. }
  267. /**
  268. * Record statistics on incoming packets. Used later to estimate QoS metrics.
  269. *
  270. * @param now Current time
  271. * @param packetId ID of packet
  272. * @param payloadLength Length of payload
  273. * @param verb Packet verb
  274. */
  275. ZT_ALWAYS_INLINE void recordIncomingPacket(int64_t now, int64_t packetId, uint16_t payloadLength, Packet::Verb verb)
  276. {
  277. Mutex::Lock _l(_statistics_m);
  278. if (verb != Packet::VERB_ACK && verb != Packet::VERB_QOS_MEASUREMENT) {
  279. if ((packetId & (ZT_PATH_QOS_ACK_PROTOCOL_DIVISOR - 1)) == 0) {
  280. _inACKRecords[packetId] = payloadLength;
  281. _packetsReceivedSinceLastAck++;
  282. _inQoSRecords[packetId] = now;
  283. _packetsReceivedSinceLastQoS++;
  284. }
  285. _packetValiditySamples.push(true);
  286. }
  287. }
  288. /**
  289. * Record that we've received a VERB_ACK on this path, also compute throughput if required.
  290. *
  291. * @param now Current time
  292. * @param ackedBytes Number of bytes acknowledged by other peer
  293. */
  294. ZT_ALWAYS_INLINE void receivedAck(int64_t now, int32_t ackedBytes)
  295. {
  296. _expectingAckAsOf = 0;
  297. _unackedBytes = (ackedBytes > _unackedBytes) ? 0 : _unackedBytes - ackedBytes;
  298. int64_t timeSinceThroughputEstimate = (now - _lastThroughputEstimation);
  299. if (timeSinceThroughputEstimate >= ZT_PATH_THROUGHPUT_MEASUREMENT_INTERVAL) {
  300. uint64_t throughput = (uint64_t)((float)(_bytesAckedSinceLastThroughputEstimation * 8) / ((float)timeSinceThroughputEstimate / (float)1000));
  301. _throughputSamples.push(throughput);
  302. _maxLifetimeThroughput = throughput > _maxLifetimeThroughput ? throughput : _maxLifetimeThroughput;
  303. _lastThroughputEstimation = now;
  304. _bytesAckedSinceLastThroughputEstimation = 0;
  305. } else {
  306. _bytesAckedSinceLastThroughputEstimation += ackedBytes;
  307. }
  308. }
  309. /**
  310. * @return Number of bytes this peer is responsible for ACKing since last ACK
  311. */
  312. inline int32_t bytesToAck()
  313. {
  314. Mutex::Lock _l(_statistics_m);
  315. int32_t bytesToAck = 0;
  316. std::map<uint64_t,uint16_t>::iterator it = _inACKRecords.begin();
  317. while (it != _inACKRecords.end()) {
  318. bytesToAck += it->second;
  319. it++;
  320. }
  321. return bytesToAck;
  322. }
  323. /**
  324. * @return Number of bytes thus far sent that have not been acknowledged by the remote peer
  325. */
  326. inline int64_t unackedSentBytes()
  327. {
  328. return _unackedBytes;
  329. }
  330. /**
  331. * Account for the fact that an ACK was just sent. Reset counters, timers, and clear statistics buffers
  332. *
  333. * @param Current time
  334. */
  335. inline void sentAck(int64_t now)
  336. {
  337. Mutex::Lock _l(_statistics_m);
  338. _inACKRecords.clear();
  339. _packetsReceivedSinceLastAck = 0;
  340. _lastAck = now;
  341. }
  342. /**
  343. * Receive QoS data, match with recorded egress times from this peer, compute latency
  344. * estimates.
  345. *
  346. * @param now Current time
  347. * @param count Number of records
  348. * @param rx_id table of packet IDs
  349. * @param rx_ts table of holding times
  350. */
  351. inline void receivedQoS(int64_t now, int count, uint64_t *rx_id, uint16_t *rx_ts)
  352. {
  353. Mutex::Lock _l(_statistics_m);
  354. // Look up egress times and compute latency values for each record
  355. std::map<uint64_t,uint64_t>::iterator it;
  356. for (int j=0; j<count; j++) {
  357. it = _outQoSRecords.find(rx_id[j]);
  358. if (it != _outQoSRecords.end()) {
  359. uint16_t rtt = (uint16_t)(now - it->second);
  360. uint16_t rtt_compensated = rtt - rx_ts[j];
  361. uint16_t latency = rtt_compensated / 2;
  362. updateLatency(latency, now);
  363. _outQoSRecords.erase(it);
  364. }
  365. }
  366. }
  367. /**
  368. * Generate the contents of a VERB_QOS_MEASUREMENT packet.
  369. *
  370. * @param now Current time
  371. * @param qosBuffer destination buffer
  372. * @return Size of payload
  373. */
  374. inline int32_t generateQoSPacket(int64_t now, char *qosBuffer)
  375. {
  376. Mutex::Lock _l(_statistics_m);
  377. int32_t len = 0;
  378. std::map<uint64_t,uint64_t>::iterator it = _inQoSRecords.begin();
  379. int i=0;
  380. while (i<_packetsReceivedSinceLastQoS && it != _inQoSRecords.end()) {
  381. uint64_t id = it->first;
  382. memcpy(qosBuffer, &id, sizeof(uint64_t));
  383. qosBuffer+=sizeof(uint64_t);
  384. uint16_t holdingTime = (uint16_t)(now - it->second);
  385. memcpy(qosBuffer, &holdingTime, sizeof(uint16_t));
  386. qosBuffer+=sizeof(uint16_t);
  387. len+=sizeof(uint64_t)+sizeof(uint16_t);
  388. _inQoSRecords.erase(it++);
  389. i++;
  390. }
  391. return len;
  392. }
  393. /**
  394. * Account for the fact that a VERB_QOS_MEASUREMENT was just sent. Reset timers.
  395. *
  396. * @param Current time
  397. */
  398. inline void sentQoS(int64_t now) {
  399. _packetsReceivedSinceLastQoS = 0;
  400. _lastQoSMeasurement = now;
  401. }
  402. /**
  403. * @param now Current time
  404. * @return Whether an ACK (VERB_ACK) packet needs to be emitted at this time
  405. */
  406. inline bool needsToSendAck(int64_t now) {
  407. return ((now - _lastAck) >= ZT_PATH_ACK_INTERVAL ||
  408. (_packetsReceivedSinceLastAck == ZT_PATH_QOS_TABLE_SIZE)) && _packetsReceivedSinceLastAck;
  409. }
  410. /**
  411. * @param now Current time
  412. * @return Whether a QoS (VERB_QOS_MEASUREMENT) packet needs to be emitted at this time
  413. */
  414. inline bool needsToSendQoS(int64_t now) {
  415. return ((_packetsReceivedSinceLastQoS >= ZT_PATH_QOS_TABLE_SIZE) ||
  416. ((now - _lastQoSMeasurement) > ZT_PATH_QOS_INTERVAL)) && _packetsReceivedSinceLastQoS;
  417. }
  418. /**
  419. * How much time has elapsed since we've been expecting a VERB_ACK on this path. This value
  420. * is used to determine a more relevant path "age". This lets us penalize paths which are no
  421. * longer ACKing, but not those that simple aren't being used to carry traffic at the
  422. * current time.
  423. */
  424. inline int64_t ackAge(int64_t now) { return _expectingAckAsOf ? now - _expectingAckAsOf : 0; }
  425. /**
  426. * The maximum observed throughput (in bits/s) for this path
  427. */
  428. inline uint64_t maxLifetimeThroughput() { return _maxLifetimeThroughput; }
  429. /**
  430. * @return The mean throughput (in bits/s) of this link
  431. */
  432. inline uint64_t meanThroughput() { return _lastComputedMeanThroughput; }
  433. /**
  434. * Assign a new relative quality value for this path in the aggregate link
  435. *
  436. * @param rq Quality of this path in comparison to other paths available to this peer
  437. */
  438. inline void updateRelativeQuality(float rq) { _lastComputedRelativeQuality = rq; }
  439. /**
  440. * @return Quality of this path compared to others in the aggregate link
  441. */
  442. inline float relativeQuality() { return _lastComputedRelativeQuality; }
  443. /**
  444. * Assign a new allocation value for this path in the aggregate link
  445. *
  446. * @param allocation Percentage of traffic to be sent over this path to a peer
  447. */
  448. inline void updateComponentAllocationOfAggregateLink(unsigned char allocation) { _lastAllocation = allocation; }
  449. /**
  450. * @return Percentage of traffic allocated to this path in the aggregate link
  451. */
  452. inline unsigned char allocation() { return _lastAllocation; }
  453. /**
  454. * @return Stability estimates can become expensive to compute, we cache the most recent result.
  455. */
  456. inline float lastComputedStability() { return _lastComputedStability; }
  457. /**
  458. * @return A pointer to a cached copy of the human-readable name of the interface this Path's localSocket is bound to
  459. */
  460. inline char *getName() { return _ifname; }
  461. /**
  462. * @return Packet delay variance
  463. */
  464. inline float packetDelayVariance() { return _lastComputedPacketDelayVariance; }
  465. /**
  466. * @return Previously-computed mean latency
  467. */
  468. inline float meanLatency() { return _lastComputedMeanLatency; }
  469. /**
  470. * @return Packet loss rate (PLR)
  471. */
  472. inline float packetLossRatio() { return _lastComputedPacketLossRatio; }
  473. /**
  474. * @return Packet error ratio (PER)
  475. */
  476. inline float packetErrorRatio() { return _lastComputedPacketErrorRatio; }
  477. /**
  478. * Record an invalid incoming packet. This packet failed MAC/compression/cipher checks and will now
  479. * contribute to a Packet Error Ratio (PER).
  480. */
  481. inline void recordInvalidPacket() { _packetValiditySamples.push(false); }
  482. /**
  483. * @return A pointer to a cached copy of the address string for this Path (For debugging only)
  484. */
  485. inline char *getAddressString() { return _addrString; }
  486. /**
  487. * @return The current throughput disturbance coefficient
  488. */
  489. inline float throughputDisturbanceCoefficient() { return _lastComputedThroughputDistCoeff; }
  490. /**
  491. * Compute and cache stability and performance metrics. The resultant stability coefficient is a measure of how "well behaved"
  492. * this path is. This figure is substantially different from (but required for the estimation of the path's overall "quality".
  493. *
  494. * @param now Current time
  495. */
  496. inline void processBackgroundPathMeasurements(const int64_t now)
  497. {
  498. if (now - _lastPathQualityComputeTime > ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
  499. Mutex::Lock _l(_statistics_m);
  500. _lastPathQualityComputeTime = now;
  501. address().toString(_addrString);
  502. _lastComputedMeanLatency = _latencySamples.mean();
  503. _lastComputedPacketDelayVariance = _latencySamples.stddev(); // Similar to "jitter" (SEE: RFC 3393, RFC 4689)
  504. _lastComputedMeanThroughput = (uint64_t)_throughputSamples.mean();
  505. // If no packet validity samples, assume PER==0
  506. _lastComputedPacketErrorRatio = 1 - (_packetValiditySamples.count() ? _packetValiditySamples.mean() : 1);
  507. // Compute path stability
  508. // Normalize measurements with wildly different ranges into a reasonable range
  509. float normalized_pdv = Utils::normalize(_lastComputedPacketDelayVariance, 0, ZT_PATH_MAX_PDV, 0, 10);
  510. float normalized_la = Utils::normalize(_lastComputedMeanLatency, 0, ZT_PATH_MAX_MEAN_LATENCY, 0, 10);
  511. float throughput_cv = _throughputSamples.mean() > 0 ? _throughputSamples.stddev() / _throughputSamples.mean() : 1;
  512. // Form an exponential cutoff and apply contribution weights
  513. float pdv_contrib = expf((-1.0f)*normalized_pdv) * (float)ZT_PATH_CONTRIB_PDV;
  514. float latency_contrib = expf((-1.0f)*normalized_la) * (float)ZT_PATH_CONTRIB_LATENCY;
  515. // Throughput Disturbance Coefficient
  516. float throughput_disturbance_contrib = expf((-1.0f)*throughput_cv) * (float)ZT_PATH_CONTRIB_THROUGHPUT_DISTURBANCE;
  517. _throughputDisturbanceSamples.push(throughput_cv);
  518. _lastComputedThroughputDistCoeff = _throughputDisturbanceSamples.mean();
  519. // Obey user-defined ignored contributions
  520. pdv_contrib = ZT_PATH_CONTRIB_PDV > 0.0 ? pdv_contrib : 1;
  521. latency_contrib = ZT_PATH_CONTRIB_LATENCY > 0.0 ? latency_contrib : 1;
  522. throughput_disturbance_contrib = ZT_PATH_CONTRIB_THROUGHPUT_DISTURBANCE > 0.0 ? throughput_disturbance_contrib : 1;
  523. // Stability
  524. _lastComputedStability = pdv_contrib + latency_contrib + throughput_disturbance_contrib;
  525. _lastComputedStability *= 1 - _lastComputedPacketErrorRatio;
  526. // Prevent QoS records from sticking around for too long
  527. std::map<uint64_t,uint64_t>::iterator it = _outQoSRecords.begin();
  528. while (it != _outQoSRecords.end()) {
  529. // Time since egress of tracked packet
  530. if ((now - it->second) >= ZT_PATH_QOS_TIMEOUT) {
  531. _outQoSRecords.erase(it++);
  532. } else { it++; }
  533. }
  534. }
  535. }
  536. /**
  537. * @return True if this path is alive (receiving data)
  538. */
  539. ZT_ALWAYS_INLINE bool alive(const int64_t now) const { return ((now - _lastIn) < ((ZT_PEER_PING_PERIOD * 2) + 5000)); }
  540. /**
  541. * @return Last time we sent something
  542. */
  543. ZT_ALWAYS_INLINE int64_t lastOut() const { return _lastOut; }
  544. /**
  545. * @return Last time we received anything
  546. */
  547. ZT_ALWAYS_INLINE int64_t lastIn() const { return _lastIn; }
  548. private:
  549. Mutex _statistics_m;
  550. volatile int64_t _lastOut;
  551. volatile int64_t _lastIn;
  552. volatile int64_t _lastPathQualityComputeTime;
  553. int64_t _localSocket;
  554. volatile unsigned int _latency;
  555. InetAddress _addr;
  556. InetAddress::IpScope _ipScope; // memoize this since it's a computed value checked often
  557. AtomicCounter __refCount;
  558. std::map<uint64_t,uint64_t> _outQoSRecords; // id:egress_time
  559. std::map<uint64_t,uint64_t> _inQoSRecords; // id:now
  560. std::map<uint64_t,uint16_t> _inACKRecords; // id:len
  561. int64_t _lastAck;
  562. int64_t _lastThroughputEstimation;
  563. int64_t _lastQoSMeasurement;
  564. int64_t _lastQoSRecordPurge;
  565. int64_t _unackedBytes;
  566. int64_t _expectingAckAsOf;
  567. int16_t _packetsReceivedSinceLastAck;
  568. int16_t _packetsReceivedSinceLastQoS;
  569. uint64_t _maxLifetimeThroughput;
  570. uint64_t _lastComputedMeanThroughput;
  571. uint64_t _bytesAckedSinceLastThroughputEstimation;
  572. float _lastComputedMeanLatency;
  573. float _lastComputedPacketDelayVariance;
  574. float _lastComputedPacketErrorRatio;
  575. float _lastComputedPacketLossRatio;
  576. // cached estimates
  577. float _lastComputedStability;
  578. float _lastComputedRelativeQuality;
  579. float _lastComputedThroughputDistCoeff;
  580. unsigned char _lastAllocation;
  581. // cached human-readable strings for tracing purposes
  582. char _ifname[16];
  583. char _addrString[256];
  584. RingBuffer<uint64_t,ZT_PATH_QUALITY_METRIC_WIN_SZ> _throughputSamples;
  585. RingBuffer<uint32_t,ZT_PATH_QUALITY_METRIC_WIN_SZ> _latencySamples;
  586. RingBuffer<bool,ZT_PATH_QUALITY_METRIC_WIN_SZ> _packetValiditySamples;
  587. RingBuffer<float,ZT_PATH_QUALITY_METRIC_WIN_SZ> _throughputDisturbanceSamples;
  588. };
  589. } // namespace ZeroTier
  590. #endif