Path.hpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. /*
  2. * Copyright (c)2019 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2023-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_PATH_HPP
  14. #define ZT_PATH_HPP
  15. #include <stdint.h>
  16. #include <string.h>
  17. #include <stdlib.h>
  18. #include <stdexcept>
  19. #include <algorithm>
  20. #include "Constants.hpp"
  21. #include "InetAddress.hpp"
  22. #include "SharedPtr.hpp"
  23. #include "AtomicCounter.hpp"
  24. #include "Utils.hpp"
  25. #include "RingBuffer.hpp"
  26. #include "Packet.hpp"
  27. #include "../osdep/Phy.hpp"
  28. /**
  29. * Maximum return value of preferenceRank()
  30. */
  31. #define ZT_PATH_MAX_PREFERENCE_RANK ((ZT_INETADDRESS_MAX_SCOPE << 1) | 1)
  32. namespace ZeroTier {
  33. class RuntimeEnvironment;
  34. /**
  35. * A path across the physical network
  36. */
  37. class Path
  38. {
  39. friend class SharedPtr<Path>;
  40. Phy<Path *> *_phy;
  41. public:
  42. /**
  43. * Efficient unique key for paths in a Hashtable
  44. */
  45. class HashKey
  46. {
  47. public:
  48. ZT_ALWAYS_INLINE HashKey() {}
  49. ZT_ALWAYS_INLINE HashKey(const int64_t l,const InetAddress &r)
  50. {
  51. if (r.ss_family == AF_INET) {
  52. _k[0] = (uint64_t)reinterpret_cast<const struct sockaddr_in *>(&r)->sin_addr.s_addr;
  53. _k[1] = (uint64_t)reinterpret_cast<const struct sockaddr_in *>(&r)->sin_port;
  54. _k[2] = (uint64_t)l;
  55. } else if (r.ss_family == AF_INET6) {
  56. memcpy(_k,reinterpret_cast<const struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,16);
  57. _k[2] = ((uint64_t)reinterpret_cast<const struct sockaddr_in6 *>(&r)->sin6_port << 32) ^ (uint64_t)l;
  58. } else {
  59. memcpy(_k,&r,std::min(sizeof(_k),sizeof(InetAddress)));
  60. _k[2] += (uint64_t)l;
  61. }
  62. }
  63. ZT_ALWAYS_INLINE unsigned long hashCode() const { return (unsigned long)(_k[0] + _k[1] + _k[2]); }
  64. ZT_ALWAYS_INLINE bool operator==(const HashKey &k) const { return ( (_k[0] == k._k[0]) && (_k[1] == k._k[1]) && (_k[2] == k._k[2]) ); }
  65. ZT_ALWAYS_INLINE bool operator!=(const HashKey &k) const { return (!(*this == k)); }
  66. private:
  67. uint64_t _k[3];
  68. };
  69. inline Path() :
  70. _lastOut(0),
  71. _lastIn(0),
  72. _lastPathQualityComputeTime(0),
  73. _localSocket(-1),
  74. _latency(0xffff),
  75. _addr(),
  76. _ipScope(InetAddress::IP_SCOPE_NONE),
  77. _lastAck(0),
  78. _lastThroughputEstimation(0),
  79. _lastQoSMeasurement(0),
  80. _lastQoSRecordPurge(0),
  81. _unackedBytes(0),
  82. _expectingAckAsOf(0),
  83. _packetsReceivedSinceLastAck(0),
  84. _packetsReceivedSinceLastQoS(0),
  85. _maxLifetimeThroughput(0),
  86. _lastComputedMeanThroughput(0),
  87. _bytesAckedSinceLastThroughputEstimation(0),
  88. _lastComputedMeanLatency(0.0),
  89. _lastComputedPacketDelayVariance(0.0),
  90. _lastComputedPacketErrorRatio(0.0),
  91. _lastComputedPacketLossRatio(0),
  92. _lastComputedStability(0.0),
  93. _lastComputedRelativeQuality(0),
  94. _lastComputedThroughputDistCoeff(0.0),
  95. _lastAllocation(0)
  96. {
  97. memset(_ifname, 0, 16);
  98. memset(_addrString, 0, sizeof(_addrString));
  99. }
  100. inline Path(const int64_t localSocket,const InetAddress &addr) :
  101. _lastOut(0),
  102. _lastIn(0),
  103. _lastPathQualityComputeTime(0),
  104. _localSocket(localSocket),
  105. _latency(0xffff),
  106. _addr(addr),
  107. _ipScope(addr.ipScope()),
  108. _lastAck(0),
  109. _lastThroughputEstimation(0),
  110. _lastQoSMeasurement(0),
  111. _lastQoSRecordPurge(0),
  112. _unackedBytes(0),
  113. _expectingAckAsOf(0),
  114. _packetsReceivedSinceLastAck(0),
  115. _packetsReceivedSinceLastQoS(0),
  116. _maxLifetimeThroughput(0),
  117. _lastComputedMeanThroughput(0),
  118. _bytesAckedSinceLastThroughputEstimation(0),
  119. _lastComputedMeanLatency(0.0),
  120. _lastComputedPacketDelayVariance(0.0),
  121. _lastComputedPacketErrorRatio(0.0),
  122. _lastComputedPacketLossRatio(0),
  123. _lastComputedStability(0.0),
  124. _lastComputedRelativeQuality(0),
  125. _lastComputedThroughputDistCoeff(0.0),
  126. _lastAllocation(0)
  127. {
  128. memset(_ifname, 0, 16);
  129. memset(_addrString, 0, sizeof(_addrString));
  130. if (_localSocket != -1) {
  131. _phy->getIfName((PhySocket *) ((uintptr_t) _localSocket), _ifname, 16);
  132. }
  133. }
  134. /**
  135. * Called when a packet is received from this remote path, regardless of content
  136. *
  137. * @param t Time of receive
  138. */
  139. ZT_ALWAYS_INLINE void received(const uint64_t t) { _lastIn = t; }
  140. /**
  141. * Send a packet via this path (last out time is also updated)
  142. *
  143. * @param RR Runtime environment
  144. * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
  145. * @param data Packet data
  146. * @param len Packet length
  147. * @param now Current time
  148. * @return True if transport reported success
  149. */
  150. bool send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now);
  151. /**
  152. * Manually update last sent time
  153. *
  154. * @param t Time of send
  155. */
  156. ZT_ALWAYS_INLINE void sent(const int64_t t) { _lastOut = t; }
  157. /**
  158. * Update path latency with a new measurement
  159. *
  160. * @param l Measured latency
  161. */
  162. ZT_ALWAYS_INLINE void updateLatency(const unsigned int l, int64_t now)
  163. {
  164. unsigned int pl = _latency;
  165. if (pl < 0xffff) {
  166. _latency = (pl + l) / 2;
  167. }
  168. else {
  169. _latency = l;
  170. }
  171. _latencySamples.push(l);
  172. }
  173. /**
  174. * @return Local socket as specified by external code
  175. */
  176. ZT_ALWAYS_INLINE int64_t localSocket() const { return _localSocket; }
  177. /**
  178. * @return Physical address
  179. */
  180. ZT_ALWAYS_INLINE const InetAddress &address() const { return _addr; }
  181. /**
  182. * @return IP scope -- faster shortcut for address().ipScope()
  183. */
  184. ZT_ALWAYS_INLINE InetAddress::IpScope ipScope() const { return _ipScope; }
  185. /**
  186. * @return Preference rank, higher == better
  187. */
  188. ZT_ALWAYS_INLINE unsigned int preferenceRank() const
  189. {
  190. // This causes us to rank paths in order of IP scope rank (see InetAdddress.hpp) but
  191. // within each IP scope class to prefer IPv6 over IPv4.
  192. return ( ((unsigned int)_ipScope << 1) | (unsigned int)(_addr.ss_family == AF_INET6) );
  193. }
  194. /**
  195. * Check whether this address is valid for a ZeroTier path
  196. *
  197. * This checks the address type and scope against address types and scopes
  198. * that we currently support for ZeroTier communication.
  199. *
  200. * @param a Address to check
  201. * @return True if address is good for ZeroTier path use
  202. */
  203. static inline bool isAddressValidForPath(const InetAddress &a)
  204. {
  205. if ((a.ss_family == AF_INET)||(a.ss_family == AF_INET6)) {
  206. switch(a.ipScope()) {
  207. /* Note: we don't do link-local at the moment. Unfortunately these
  208. * cause several issues. The first is that they usually require a
  209. * device qualifier, which we don't handle yet and can't portably
  210. * push in PUSH_DIRECT_PATHS. The second is that some OSes assign
  211. * these very ephemerally or otherwise strangely. So we'll use
  212. * private, pseudo-private, shared (e.g. carrier grade NAT), or
  213. * global IP addresses. */
  214. case InetAddress::IP_SCOPE_PRIVATE:
  215. case InetAddress::IP_SCOPE_PSEUDOPRIVATE:
  216. case InetAddress::IP_SCOPE_SHARED:
  217. case InetAddress::IP_SCOPE_GLOBAL:
  218. if (a.ss_family == AF_INET6) {
  219. // TEMPORARY HACK: for now, we are going to blacklist he.net IPv6
  220. // tunnels due to very spotty performance and low MTU issues over
  221. // these IPv6 tunnel links.
  222. const uint8_t *ipd = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr);
  223. if ((ipd[0] == 0x20)&&(ipd[1] == 0x01)&&(ipd[2] == 0x04)&&(ipd[3] == 0x70))
  224. return false;
  225. }
  226. return true;
  227. default:
  228. return false;
  229. }
  230. }
  231. return false;
  232. }
  233. /**
  234. * @return Latency or 0xffff if unknown
  235. */
  236. ZT_ALWAYS_INLINE unsigned int latency() const { return _latency; }
  237. /**
  238. * @return Path quality -- lower is better
  239. */
  240. ZT_ALWAYS_INLINE long quality(const int64_t now) const
  241. {
  242. const long l = (long)_latency;
  243. const long age = (long)std::min((long)(now - _lastIn),(long)(ZT_PEER_PING_PERIOD * 10)); // set an upper sanity limit to avoid overflow
  244. return ( ( (age < (ZT_PEER_PING_PERIOD + 5000)) ? l : (l + 65535 + age) ) * (long)((ZT_INETADDRESS_MAX_SCOPE - _ipScope) + 1));
  245. }
  246. /**
  247. * Record statistics on outgoing packets. Used later to estimate QoS metrics.
  248. *
  249. * @param now Current time
  250. * @param packetId ID of packet
  251. * @param payloadLength Length of payload
  252. * @param verb Packet verb
  253. */
  254. ZT_ALWAYS_INLINE void recordOutgoingPacket(int64_t now, int64_t packetId, uint16_t payloadLength, Packet::Verb verb)
  255. {
  256. Mutex::Lock _l(_statistics_m);
  257. if (verb != Packet::VERB_ACK && verb != Packet::VERB_QOS_MEASUREMENT) {
  258. if ((packetId & (ZT_PATH_QOS_ACK_PROTOCOL_DIVISOR - 1)) == 0) {
  259. _unackedBytes += payloadLength;
  260. // Take note that we're expecting a VERB_ACK on this path as of a specific time
  261. _expectingAckAsOf = ackAge(now) > ZT_PATH_ACK_INTERVAL ? _expectingAckAsOf : now;
  262. if (_outQoSRecords.size() < ZT_PATH_MAX_OUTSTANDING_QOS_RECORDS) {
  263. _outQoSRecords[packetId] = now;
  264. }
  265. }
  266. }
  267. }
  268. /**
  269. * Record statistics on incoming packets. Used later to estimate QoS metrics.
  270. *
  271. * @param now Current time
  272. * @param packetId ID of packet
  273. * @param payloadLength Length of payload
  274. * @param verb Packet verb
  275. */
  276. ZT_ALWAYS_INLINE void recordIncomingPacket(int64_t now, int64_t packetId, uint16_t payloadLength, Packet::Verb verb)
  277. {
  278. Mutex::Lock _l(_statistics_m);
  279. if (verb != Packet::VERB_ACK && verb != Packet::VERB_QOS_MEASUREMENT) {
  280. if ((packetId & (ZT_PATH_QOS_ACK_PROTOCOL_DIVISOR - 1)) == 0) {
  281. _inACKRecords[packetId] = payloadLength;
  282. _packetsReceivedSinceLastAck++;
  283. _inQoSRecords[packetId] = now;
  284. _packetsReceivedSinceLastQoS++;
  285. }
  286. _packetValiditySamples.push(true);
  287. }
  288. }
  289. /**
  290. * Record that we've received a VERB_ACK on this path, also compute throughput if required.
  291. *
  292. * @param now Current time
  293. * @param ackedBytes Number of bytes acknowledged by other peer
  294. */
  295. ZT_ALWAYS_INLINE void receivedAck(int64_t now, int32_t ackedBytes)
  296. {
  297. _expectingAckAsOf = 0;
  298. _unackedBytes = (ackedBytes > _unackedBytes) ? 0 : _unackedBytes - ackedBytes;
  299. int64_t timeSinceThroughputEstimate = (now - _lastThroughputEstimation);
  300. if (timeSinceThroughputEstimate >= ZT_PATH_THROUGHPUT_MEASUREMENT_INTERVAL) {
  301. uint64_t throughput = (uint64_t)((float)(_bytesAckedSinceLastThroughputEstimation * 8) / ((float)timeSinceThroughputEstimate / (float)1000));
  302. _throughputSamples.push(throughput);
  303. _maxLifetimeThroughput = throughput > _maxLifetimeThroughput ? throughput : _maxLifetimeThroughput;
  304. _lastThroughputEstimation = now;
  305. _bytesAckedSinceLastThroughputEstimation = 0;
  306. } else {
  307. _bytesAckedSinceLastThroughputEstimation += ackedBytes;
  308. }
  309. }
  310. /**
  311. * @return Number of bytes this peer is responsible for ACKing since last ACK
  312. */
  313. inline int32_t bytesToAck()
  314. {
  315. Mutex::Lock _l(_statistics_m);
  316. int32_t bytesToAck = 0;
  317. std::map<uint64_t,uint16_t>::iterator it = _inACKRecords.begin();
  318. while (it != _inACKRecords.end()) {
  319. bytesToAck += it->second;
  320. it++;
  321. }
  322. return bytesToAck;
  323. }
  324. /**
  325. * @return Number of bytes thus far sent that have not been acknowledged by the remote peer
  326. */
  327. inline int64_t unackedSentBytes()
  328. {
  329. return _unackedBytes;
  330. }
  331. /**
  332. * Account for the fact that an ACK was just sent. Reset counters, timers, and clear statistics buffers
  333. *
  334. * @param Current time
  335. */
  336. inline void sentAck(int64_t now)
  337. {
  338. Mutex::Lock _l(_statistics_m);
  339. _inACKRecords.clear();
  340. _packetsReceivedSinceLastAck = 0;
  341. _lastAck = now;
  342. }
  343. /**
  344. * Receive QoS data, match with recorded egress times from this peer, compute latency
  345. * estimates.
  346. *
  347. * @param now Current time
  348. * @param count Number of records
  349. * @param rx_id table of packet IDs
  350. * @param rx_ts table of holding times
  351. */
  352. inline void receivedQoS(int64_t now, int count, uint64_t *rx_id, uint16_t *rx_ts)
  353. {
  354. Mutex::Lock _l(_statistics_m);
  355. // Look up egress times and compute latency values for each record
  356. std::map<uint64_t,uint64_t>::iterator it;
  357. for (int j=0; j<count; j++) {
  358. it = _outQoSRecords.find(rx_id[j]);
  359. if (it != _outQoSRecords.end()) {
  360. uint16_t rtt = (uint16_t)(now - it->second);
  361. uint16_t rtt_compensated = rtt - rx_ts[j];
  362. uint16_t latency = rtt_compensated / 2;
  363. updateLatency(latency, now);
  364. _outQoSRecords.erase(it);
  365. }
  366. }
  367. }
  368. /**
  369. * Generate the contents of a VERB_QOS_MEASUREMENT packet.
  370. *
  371. * @param now Current time
  372. * @param qosBuffer destination buffer
  373. * @return Size of payload
  374. */
  375. inline int32_t generateQoSPacket(int64_t now, char *qosBuffer)
  376. {
  377. Mutex::Lock _l(_statistics_m);
  378. int32_t len = 0;
  379. std::map<uint64_t,uint64_t>::iterator it = _inQoSRecords.begin();
  380. int i=0;
  381. while (i<_packetsReceivedSinceLastQoS && it != _inQoSRecords.end()) {
  382. uint64_t id = it->first;
  383. memcpy(qosBuffer, &id, sizeof(uint64_t));
  384. qosBuffer+=sizeof(uint64_t);
  385. uint16_t holdingTime = (uint16_t)(now - it->second);
  386. memcpy(qosBuffer, &holdingTime, sizeof(uint16_t));
  387. qosBuffer+=sizeof(uint16_t);
  388. len+=sizeof(uint64_t)+sizeof(uint16_t);
  389. _inQoSRecords.erase(it++);
  390. i++;
  391. }
  392. return len;
  393. }
  394. /**
  395. * Account for the fact that a VERB_QOS_MEASUREMENT was just sent. Reset timers.
  396. *
  397. * @param Current time
  398. */
  399. inline void sentQoS(int64_t now) {
  400. _packetsReceivedSinceLastQoS = 0;
  401. _lastQoSMeasurement = now;
  402. }
  403. /**
  404. * @param now Current time
  405. * @return Whether an ACK (VERB_ACK) packet needs to be emitted at this time
  406. */
  407. inline bool needsToSendAck(int64_t now) {
  408. return ((now - _lastAck) >= ZT_PATH_ACK_INTERVAL ||
  409. (_packetsReceivedSinceLastAck == ZT_PATH_QOS_TABLE_SIZE)) && _packetsReceivedSinceLastAck;
  410. }
  411. /**
  412. * @param now Current time
  413. * @return Whether a QoS (VERB_QOS_MEASUREMENT) packet needs to be emitted at this time
  414. */
  415. inline bool needsToSendQoS(int64_t now) {
  416. return ((_packetsReceivedSinceLastQoS >= ZT_PATH_QOS_TABLE_SIZE) ||
  417. ((now - _lastQoSMeasurement) > ZT_PATH_QOS_INTERVAL)) && _packetsReceivedSinceLastQoS;
  418. }
  419. /**
  420. * How much time has elapsed since we've been expecting a VERB_ACK on this path. This value
  421. * is used to determine a more relevant path "age". This lets us penalize paths which are no
  422. * longer ACKing, but not those that simple aren't being used to carry traffic at the
  423. * current time.
  424. */
  425. inline int64_t ackAge(int64_t now) { return _expectingAckAsOf ? now - _expectingAckAsOf : 0; }
  426. /**
  427. * The maximum observed throughput (in bits/s) for this path
  428. */
  429. inline uint64_t maxLifetimeThroughput() { return _maxLifetimeThroughput; }
  430. /**
  431. * @return The mean throughput (in bits/s) of this link
  432. */
  433. inline uint64_t meanThroughput() { return _lastComputedMeanThroughput; }
  434. /**
  435. * Assign a new relative quality value for this path in the aggregate link
  436. *
  437. * @param rq Quality of this path in comparison to other paths available to this peer
  438. */
  439. inline void updateRelativeQuality(float rq) { _lastComputedRelativeQuality = rq; }
  440. /**
  441. * @return Quality of this path compared to others in the aggregate link
  442. */
  443. inline float relativeQuality() { return _lastComputedRelativeQuality; }
  444. /**
  445. * Assign a new allocation value for this path in the aggregate link
  446. *
  447. * @param allocation Percentage of traffic to be sent over this path to a peer
  448. */
  449. inline void updateComponentAllocationOfAggregateLink(unsigned char allocation) { _lastAllocation = allocation; }
  450. /**
  451. * @return Percentage of traffic allocated to this path in the aggregate link
  452. */
  453. inline unsigned char allocation() { return _lastAllocation; }
  454. /**
  455. * @return Stability estimates can become expensive to compute, we cache the most recent result.
  456. */
  457. inline float lastComputedStability() { return _lastComputedStability; }
  458. /**
  459. * @return A pointer to a cached copy of the human-readable name of the interface this Path's localSocket is bound to
  460. */
  461. inline char *getName() { return _ifname; }
  462. /**
  463. * @return Packet delay variance
  464. */
  465. inline float packetDelayVariance() { return _lastComputedPacketDelayVariance; }
  466. /**
  467. * @return Previously-computed mean latency
  468. */
  469. inline float meanLatency() { return _lastComputedMeanLatency; }
  470. /**
  471. * @return Packet loss rate (PLR)
  472. */
  473. inline float packetLossRatio() { return _lastComputedPacketLossRatio; }
  474. /**
  475. * @return Packet error ratio (PER)
  476. */
  477. inline float packetErrorRatio() { return _lastComputedPacketErrorRatio; }
  478. /**
  479. * Record an invalid incoming packet. This packet failed MAC/compression/cipher checks and will now
  480. * contribute to a Packet Error Ratio (PER).
  481. */
  482. inline void recordInvalidPacket() { _packetValiditySamples.push(false); }
  483. /**
  484. * @return A pointer to a cached copy of the address string for this Path (For debugging only)
  485. */
  486. inline char *getAddressString() { return _addrString; }
  487. /**
  488. * @return The current throughput disturbance coefficient
  489. */
  490. inline float throughputDisturbanceCoefficient() { return _lastComputedThroughputDistCoeff; }
  491. /**
  492. * Compute and cache stability and performance metrics. The resultant stability coefficient is a measure of how "well behaved"
  493. * this path is. This figure is substantially different from (but required for the estimation of the path's overall "quality".
  494. *
  495. * @param now Current time
  496. */
  497. inline void processBackgroundPathMeasurements(const int64_t now)
  498. {
  499. if (now - _lastPathQualityComputeTime > ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
  500. Mutex::Lock _l(_statistics_m);
  501. _lastPathQualityComputeTime = now;
  502. address().toString(_addrString);
  503. _lastComputedMeanLatency = _latencySamples.mean();
  504. _lastComputedPacketDelayVariance = _latencySamples.stddev(); // Similar to "jitter" (SEE: RFC 3393, RFC 4689)
  505. _lastComputedMeanThroughput = (uint64_t)_throughputSamples.mean();
  506. // If no packet validity samples, assume PER==0
  507. _lastComputedPacketErrorRatio = 1 - (_packetValiditySamples.count() ? _packetValiditySamples.mean() : 1);
  508. // Compute path stability
  509. // Normalize measurements with wildly different ranges into a reasonable range
  510. float normalized_pdv = Utils::normalize(_lastComputedPacketDelayVariance, 0, ZT_PATH_MAX_PDV, 0, 10);
  511. float normalized_la = Utils::normalize(_lastComputedMeanLatency, 0, ZT_PATH_MAX_MEAN_LATENCY, 0, 10);
  512. float throughput_cv = _throughputSamples.mean() > 0 ? _throughputSamples.stddev() / _throughputSamples.mean() : 1;
  513. // Form an exponential cutoff and apply contribution weights
  514. float pdv_contrib = expf((-1.0f)*normalized_pdv) * (float)ZT_PATH_CONTRIB_PDV;
  515. float latency_contrib = expf((-1.0f)*normalized_la) * (float)ZT_PATH_CONTRIB_LATENCY;
  516. // Throughput Disturbance Coefficient
  517. float throughput_disturbance_contrib = expf((-1.0f)*throughput_cv) * (float)ZT_PATH_CONTRIB_THROUGHPUT_DISTURBANCE;
  518. _throughputDisturbanceSamples.push(throughput_cv);
  519. _lastComputedThroughputDistCoeff = _throughputDisturbanceSamples.mean();
  520. // Obey user-defined ignored contributions
  521. pdv_contrib = ZT_PATH_CONTRIB_PDV > 0.0 ? pdv_contrib : 1;
  522. latency_contrib = ZT_PATH_CONTRIB_LATENCY > 0.0 ? latency_contrib : 1;
  523. throughput_disturbance_contrib = ZT_PATH_CONTRIB_THROUGHPUT_DISTURBANCE > 0.0 ? throughput_disturbance_contrib : 1;
  524. // Stability
  525. _lastComputedStability = pdv_contrib + latency_contrib + throughput_disturbance_contrib;
  526. _lastComputedStability *= 1 - _lastComputedPacketErrorRatio;
  527. // Prevent QoS records from sticking around for too long
  528. std::map<uint64_t,uint64_t>::iterator it = _outQoSRecords.begin();
  529. while (it != _outQoSRecords.end()) {
  530. // Time since egress of tracked packet
  531. if ((now - it->second) >= ZT_PATH_QOS_TIMEOUT) {
  532. _outQoSRecords.erase(it++);
  533. } else { it++; }
  534. }
  535. }
  536. }
  537. /**
  538. * @return True if this path is alive (receiving data)
  539. */
  540. ZT_ALWAYS_INLINE bool alive(const int64_t now) const { return ((now - _lastIn) < ((ZT_PEER_PING_PERIOD * 2) + 5000)); }
  541. /**
  542. * @return Last time we sent something
  543. */
  544. ZT_ALWAYS_INLINE int64_t lastOut() const { return _lastOut; }
  545. /**
  546. * @return Last time we received anything
  547. */
  548. ZT_ALWAYS_INLINE int64_t lastIn() const { return _lastIn; }
  549. private:
  550. Mutex _statistics_m;
  551. volatile int64_t _lastOut;
  552. volatile int64_t _lastIn;
  553. volatile int64_t _lastPathQualityComputeTime;
  554. int64_t _localSocket;
  555. volatile unsigned int _latency;
  556. InetAddress _addr;
  557. InetAddress::IpScope _ipScope; // memoize this since it's a computed value checked often
  558. AtomicCounter __refCount;
  559. std::map<uint64_t,uint64_t> _outQoSRecords; // id:egress_time
  560. std::map<uint64_t,uint64_t> _inQoSRecords; // id:now
  561. std::map<uint64_t,uint16_t> _inACKRecords; // id:len
  562. int64_t _lastAck;
  563. int64_t _lastThroughputEstimation;
  564. int64_t _lastQoSMeasurement;
  565. int64_t _lastQoSRecordPurge;
  566. int64_t _unackedBytes;
  567. int64_t _expectingAckAsOf;
  568. int16_t _packetsReceivedSinceLastAck;
  569. int16_t _packetsReceivedSinceLastQoS;
  570. uint64_t _maxLifetimeThroughput;
  571. uint64_t _lastComputedMeanThroughput;
  572. uint64_t _bytesAckedSinceLastThroughputEstimation;
  573. float _lastComputedMeanLatency;
  574. float _lastComputedPacketDelayVariance;
  575. float _lastComputedPacketErrorRatio;
  576. float _lastComputedPacketLossRatio;
  577. // cached estimates
  578. float _lastComputedStability;
  579. float _lastComputedRelativeQuality;
  580. float _lastComputedThroughputDistCoeff;
  581. unsigned char _lastAllocation;
  582. // cached human-readable strings for tracing purposes
  583. char _ifname[16];
  584. char _addrString[256];
  585. RingBuffer<uint64_t,ZT_PATH_QUALITY_METRIC_WIN_SZ> _throughputSamples;
  586. RingBuffer<uint32_t,ZT_PATH_QUALITY_METRIC_WIN_SZ> _latencySamples;
  587. RingBuffer<bool,ZT_PATH_QUALITY_METRIC_WIN_SZ> _packetValiditySamples;
  588. RingBuffer<float,ZT_PATH_QUALITY_METRIC_WIN_SZ> _throughputDisturbanceSamples;
  589. };
  590. } // namespace ZeroTier
  591. #endif