Bond.cpp 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2025-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include <cmath>
  14. #include "../osdep/OSUtils.hpp"
  15. #include "Peer.hpp"
  16. #include "Bond.hpp"
  17. #include "Switch.hpp"
  18. #include "Flow.hpp"
  19. #include "Path.hpp"
  20. namespace ZeroTier {
  21. Bond::Bond(const RuntimeEnvironment *renv, int policy, const SharedPtr<Peer>& peer) :
  22. RR(renv),
  23. _peer(peer),
  24. _qosCutoffCount(0),
  25. _ackCutoffCount(0),
  26. _lastAckRateCheck(0),
  27. _lastQoSRateCheck(0),
  28. _lastQualityEstimation(0),
  29. _lastCheckUserPreferences(0),
  30. _lastBackgroundTaskCheck(0),
  31. _lastBondStatusLog(0),
  32. _lastPathNegotiationReceived(0),
  33. _lastPathNegotiationCheck(0),
  34. _lastSentPathNegotiationRequest(0),
  35. _lastFlowStatReset(0),
  36. _lastFlowExpirationCheck(0),
  37. _lastFlowRebalance(0),
  38. _lastFrame(0),
  39. _lastActiveBackupPathChange(0)
  40. {
  41. setReasonableDefaults(policy, SharedPtr<Bond>(), false);
  42. _policyAlias = BondController::getPolicyStrByCode(policy);
  43. }
  44. Bond::Bond(const RuntimeEnvironment *renv, std::string& basePolicy, std::string& policyAlias, const SharedPtr<Peer>& peer) :
  45. RR(renv),
  46. _policyAlias(policyAlias),
  47. _peer(peer)
  48. {
  49. setReasonableDefaults(BondController::getPolicyCodeByStr(basePolicy), SharedPtr<Bond>(), false);
  50. }
  51. Bond::Bond(const RuntimeEnvironment *renv, SharedPtr<Bond> originalBond, const SharedPtr<Peer>& peer) :
  52. RR(renv),
  53. _peer(peer),
  54. _lastAckRateCheck(0),
  55. _lastQoSRateCheck(0),
  56. _lastQualityEstimation(0),
  57. _lastCheckUserPreferences(0),
  58. _lastBackgroundTaskCheck(0),
  59. _lastBondStatusLog(0),
  60. _lastPathNegotiationReceived(0),
  61. _lastPathNegotiationCheck(0),
  62. _lastFlowStatReset(0),
  63. _lastFlowExpirationCheck(0),
  64. _lastFlowRebalance(0),
  65. _lastFrame(0)
  66. {
  67. setReasonableDefaults(originalBond->_bondingPolicy, originalBond, true);
  68. }
  69. void Bond::nominatePath(const SharedPtr<Path>& path, int64_t now)
  70. {
  71. char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  72. Mutex::Lock _l(_paths_m);
  73. if (!RR->bc->linkAllowed(_policyAlias, getLink(path))) {
  74. return;
  75. }
  76. bool alreadyPresent = false;
  77. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  78. if (path.ptr() == _paths[i].ptr()) {
  79. // Previously encountered path, not notifying bond
  80. alreadyPresent = true;
  81. break;
  82. }
  83. }
  84. if (!alreadyPresent) {
  85. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  86. if (!_paths[i]) {
  87. _paths[i] = path;
  88. sprintf(traceMsg, "%s (bond) Nominating link %s/%s to peer %llx. It has now entered its trial period",
  89. OSUtils::humanReadableTimestamp().c_str(), getLink(path)->ifname().c_str(), pathStr, _peer->_id.address().toInt());
  90. RR->t->bondStateMessage(NULL, traceMsg);
  91. _paths[i]->startTrial(now);
  92. break;
  93. }
  94. }
  95. }
  96. curateBond(now, true);
  97. estimatePathQuality(now);
  98. }
  99. SharedPtr<Path> Bond::getAppropriatePath(int64_t now, int32_t flowId)
  100. {
  101. Mutex::Lock _l(_paths_m);
  102. /**
  103. * active-backup
  104. */
  105. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  106. if (_abPath) {
  107. return _abPath;
  108. }
  109. }
  110. /**
  111. * broadcast
  112. */
  113. if (_bondingPolicy == ZT_BONDING_POLICY_BROADCAST) {
  114. return SharedPtr<Path>(); // Handled in Switch::_trySend()
  115. }
  116. if (!_numBondedPaths) {
  117. return SharedPtr<Path>(); // No paths assigned to bond yet, cannot balance traffic
  118. }
  119. /**
  120. * balance-rr
  121. */
  122. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR) {
  123. if (!_allowFlowHashing) {
  124. if (_packetsPerLink == 0) {
  125. // Randomly select a path
  126. return _paths[_bondedIdx[_freeRandomByte % _numBondedPaths]]; // TODO: Optimize
  127. }
  128. if (_rrPacketsSentOnCurrLink < _packetsPerLink) {
  129. // Continue to use this link
  130. ++_rrPacketsSentOnCurrLink;
  131. return _paths[_bondedIdx[_rrIdx]];
  132. }
  133. // Reset striping counter
  134. _rrPacketsSentOnCurrLink = 0;
  135. if (_numBondedPaths == 1) {
  136. _rrIdx = 0;
  137. }
  138. else {
  139. int _tempIdx = _rrIdx;
  140. for (int searchCount = 0; searchCount < (_numBondedPaths-1); searchCount++) {
  141. _tempIdx = (_tempIdx == (_numBondedPaths-1)) ? 0 : _tempIdx+1;
  142. if (_bondedIdx[_tempIdx] != ZT_MAX_PEER_NETWORK_PATHS) {
  143. if (_paths[_bondedIdx[_tempIdx]] && _paths[_bondedIdx[_tempIdx]]->eligible(now,_ackSendInterval)) {
  144. _rrIdx = _tempIdx;
  145. break;
  146. }
  147. }
  148. }
  149. }
  150. if (_paths[_bondedIdx[_rrIdx]]) {
  151. return _paths[_bondedIdx[_rrIdx]];
  152. }
  153. }
  154. }
  155. /**
  156. * balance-xor
  157. */
  158. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  159. if (!_allowFlowHashing || flowId == -1) {
  160. // No specific path required for unclassified traffic, send on anything
  161. return _paths[_bondedIdx[_freeRandomByte % _numBondedPaths]]; // TODO: Optimize
  162. }
  163. else if (_allowFlowHashing) {
  164. // TODO: Optimize
  165. Mutex::Lock _l(_flows_m);
  166. SharedPtr<Flow> flow;
  167. if (_flows.count(flowId)) {
  168. flow = _flows[flowId];
  169. flow->updateActivity(now);
  170. }
  171. else {
  172. unsigned char entropy;
  173. Utils::getSecureRandom(&entropy, 1);
  174. flow = createFlow(SharedPtr<Path>(), flowId, entropy, now);
  175. }
  176. if (flow) {
  177. return flow->assignedPath();
  178. }
  179. }
  180. }
  181. return SharedPtr<Path>();
  182. }
  183. void Bond::recordIncomingInvalidPacket(const SharedPtr<Path>& path)
  184. {
  185. //char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  186. //sprintf(traceMsg, "%s (qos) Invalid packet on link %s/%s from peer %llx",
  187. // OSUtils::humanReadableTimestamp().c_str(), getLink(path)->ifname().c_str(), pathStr, _peer->_id.address().toInt());
  188. //RR->t->bondStateMessage(NULL, traceMsg);
  189. Mutex::Lock _l(_paths_m);
  190. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  191. if (_paths[i] == path) {
  192. _paths[i]->packetValiditySamples.push(false);
  193. }
  194. }
  195. }
  196. void Bond::recordOutgoingPacket(const SharedPtr<Path> &path, const uint64_t packetId,
  197. uint16_t payloadLength, const Packet::Verb verb, const int32_t flowId, int64_t now)
  198. {
  199. //char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  200. //sprintf(traceMsg, "%s (bond) Outgoing packet on link %s/%s to peer %llx",
  201. // OSUtils::humanReadableTimestamp().c_str(), getLink(path)->ifname().c_str(), pathStr, _peer->_id.address().toInt());
  202. //RR->t->bondStateMessage(NULL, traceMsg);
  203. _freeRandomByte += (unsigned char)(packetId >> 8); // Grab entropy to use in path selection logic
  204. if (!_shouldCollectPathStatistics) {
  205. return;
  206. }
  207. bool isFrame = (verb == Packet::VERB_FRAME || verb == Packet::VERB_EXT_FRAME);
  208. bool shouldRecord = (packetId & (ZT_QOS_ACK_DIVISOR - 1)
  209. && (verb != Packet::VERB_ACK)
  210. && (verb != Packet::VERB_QOS_MEASUREMENT));
  211. if (isFrame || shouldRecord) {
  212. Mutex::Lock _l(_paths_m);
  213. if (isFrame) {
  214. ++(path->_packetsOut);
  215. _lastFrame=now;
  216. }
  217. if (shouldRecord) {
  218. path->_unackedBytes += payloadLength;
  219. // Take note that we're expecting a VERB_ACK on this path as of a specific time
  220. if (path->qosStatsOut.size() < ZT_QOS_MAX_OUTSTANDING_RECORDS) {
  221. path->qosStatsOut[packetId] = now;
  222. }
  223. }
  224. }
  225. if (_allowFlowHashing) {
  226. if (_allowFlowHashing && (flowId != ZT_QOS_NO_FLOW)) {
  227. Mutex::Lock _l(_flows_m);
  228. if (_flows.count(flowId)) {
  229. _flows[flowId]->recordOutgoingBytes(payloadLength);
  230. }
  231. }
  232. }
  233. }
  234. void Bond::recordIncomingPacket(const SharedPtr<Path>& path, uint64_t packetId, uint16_t payloadLength,
  235. Packet::Verb verb, int32_t flowId, int64_t now)
  236. {
  237. //char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  238. //sprintf(traceMsg, "%s (bond) Incoming packet on link %s/%s from peer %llx [id=%llx, len=%d, verb=%d, flowId=%x]",
  239. // OSUtils::humanReadableTimestamp().c_str(), getLink(path)->ifname().c_str(), pathStr, _peer->_id.address().toInt(), packetId, payloadLength, verb, flowId);
  240. //RR->t->bondStateMessage(NULL, traceMsg);
  241. bool isFrame = (verb == Packet::VERB_FRAME || verb == Packet::VERB_EXT_FRAME);
  242. bool shouldRecord = (packetId & (ZT_QOS_ACK_DIVISOR - 1)
  243. && (verb != Packet::VERB_ACK)
  244. && (verb != Packet::VERB_QOS_MEASUREMENT));
  245. if (isFrame || shouldRecord) {
  246. Mutex::Lock _l(_paths_m);
  247. if (isFrame) {
  248. ++(path->_packetsIn);
  249. _lastFrame=now;
  250. }
  251. if (shouldRecord) {
  252. path->ackStatsIn[packetId] = payloadLength;
  253. ++(path->_packetsReceivedSinceLastAck);
  254. path->qosStatsIn[packetId] = now;
  255. ++(path->_packetsReceivedSinceLastQoS);
  256. path->packetValiditySamples.push(true);
  257. }
  258. }
  259. /**
  260. * Learn new flows and pro-actively create entries for them in the bond so
  261. * that the next time we send a packet out that is part of a flow we know
  262. * which path to use.
  263. */
  264. if ((flowId != ZT_QOS_NO_FLOW)
  265. && (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR
  266. || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR
  267. || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE)) {
  268. Mutex::Lock _l(_flows_m);
  269. SharedPtr<Flow> flow;
  270. if (!_flows.count(flowId)) {
  271. flow = createFlow(path, flowId, 0, now);
  272. } else {
  273. flow = _flows[flowId];
  274. }
  275. if (flow) {
  276. flow->recordIncomingBytes(payloadLength);
  277. }
  278. }
  279. }
  280. void Bond::receivedQoS(const SharedPtr<Path>& path, int64_t now, int count, uint64_t *rx_id, uint16_t *rx_ts)
  281. {
  282. Mutex::Lock _l(_paths_m);
  283. //char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  284. //sprintf(traceMsg, "%s (qos) Received QoS packet sampling %d frames from peer %llx via %s/%s",
  285. // OSUtils::humanReadableTimestamp().c_str(), count, _peer->_id.address().toInt(), getLink(path)->ifname().c_str(), pathStr);
  286. //RR->t->bondStateMessage(NULL, traceMsg);
  287. // Look up egress times and compute latency values for each record
  288. std::map<uint64_t,uint64_t>::iterator it;
  289. for (int j=0; j<count; j++) {
  290. it = path->qosStatsOut.find(rx_id[j]);
  291. if (it != path->qosStatsOut.end()) {
  292. path->latencySamples.push(((uint16_t)(now - it->second) - rx_ts[j]) / 2);
  293. path->qosStatsOut.erase(it);
  294. }
  295. }
  296. path->qosRecordSize.push(count);
  297. }
  298. void Bond::receivedAck(const SharedPtr<Path>& path, int64_t now, int32_t ackedBytes)
  299. {
  300. Mutex::Lock _l(_paths_m);
  301. //char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  302. //sprintf(traceMsg, "%s (qos) Received ACK packet for %d bytes from peer %llx via %s/%s",
  303. // OSUtils::humanReadableTimestamp().c_str(), ackedBytes, _peer->_id.address().toInt(), getLink(path)->ifname().c_str(), pathStr);
  304. //RR->t->bondStateMessage(NULL, traceMsg);
  305. path->_lastAckReceived = now;
  306. path->_unackedBytes = (ackedBytes > path->_unackedBytes) ? 0 : path->_unackedBytes - ackedBytes;
  307. int64_t timeSinceThroughputEstimate = (now - path->_lastThroughputEstimation);
  308. if (timeSinceThroughputEstimate >= throughputMeasurementInterval) {
  309. // TODO: See if this floating point math can be reduced
  310. uint64_t throughput = (uint64_t)((float)(path->_bytesAckedSinceLastThroughputEstimation) / ((float)timeSinceThroughputEstimate / (float)1000));
  311. throughput /= 1000;
  312. if (throughput > 0.0) {
  313. path->throughputSamples.push(throughput);
  314. path->_throughputMax = throughput > path->_throughputMax ? throughput : path->_throughputMax;
  315. }
  316. path->_lastThroughputEstimation = now;
  317. path->_bytesAckedSinceLastThroughputEstimation = 0;
  318. } else {
  319. path->_bytesAckedSinceLastThroughputEstimation += ackedBytes;
  320. }
  321. }
  322. int32_t Bond::generateQoSPacket(const SharedPtr<Path>& path, int64_t now, char *qosBuffer)
  323. {
  324. int32_t len = 0;
  325. std::map<uint64_t,uint64_t>::iterator it = path->qosStatsIn.begin();
  326. int i=0;
  327. int numRecords = std::min(path->_packetsReceivedSinceLastQoS,ZT_QOS_TABLE_SIZE);
  328. while (i<numRecords && it != path->qosStatsIn.end()) {
  329. uint64_t id = it->first;
  330. memcpy(qosBuffer, &id, sizeof(uint64_t));
  331. qosBuffer+=sizeof(uint64_t);
  332. uint16_t holdingTime = (uint16_t)(now - it->second);
  333. memcpy(qosBuffer, &holdingTime, sizeof(uint16_t));
  334. qosBuffer+=sizeof(uint16_t);
  335. len+=sizeof(uint64_t)+sizeof(uint16_t);
  336. path->qosStatsIn.erase(it++);
  337. ++i;
  338. }
  339. return len;
  340. }
  341. bool Bond::assignFlowToBondedPath(SharedPtr<Flow> &flow, int64_t now)
  342. {
  343. char traceMsg[256];
  344. char curPathStr[128];
  345. unsigned int idx = ZT_MAX_PEER_NETWORK_PATHS;
  346. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR) {
  347. idx = abs((int)(flow->id() % (_numBondedPaths)));
  348. SharedPtr<Link> link = RR->bc->getLinkBySocket(_policyAlias, _paths[_bondedIdx[idx]]->localSocket());
  349. _paths[_bondedIdx[idx]]->address().toString(curPathStr);
  350. sprintf(traceMsg, "%s (balance-xor) Assigned outgoing flow %x to peer %llx to link %s/%s, %lu active flow(s)",
  351. OSUtils::humanReadableTimestamp().c_str(), flow->id(), _peer->_id.address().toInt(), link->ifname().c_str(), curPathStr, _flows.size());
  352. RR->t->bondStateMessage(NULL, traceMsg);
  353. flow->assignPath(_paths[_bondedIdx[idx]],now);
  354. ++(_paths[_bondedIdx[idx]]->_assignedFlowCount);
  355. }
  356. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  357. unsigned char entropy;
  358. Utils::getSecureRandom(&entropy, 1);
  359. if (_totalBondUnderload) {
  360. entropy %= _totalBondUnderload;
  361. }
  362. if (!_numBondedPaths) {
  363. sprintf(traceMsg, "%s (balance-aware) There are no bonded paths, cannot assign flow %x\n",
  364. OSUtils::humanReadableTimestamp().c_str(), flow->id());
  365. RR->t->bondStateMessage(NULL, traceMsg);
  366. return false;
  367. }
  368. /* Since there may be scenarios where a path is removed before we can re-estimate
  369. relative qualities (and thus allocations) we need to down-modulate the entropy
  370. value that we use to randomly assign among the surviving paths, otherwise we risk
  371. not being able to find a path to assign this flow to. */
  372. int totalIncompleteAllocation = 0;
  373. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  374. if (_paths[i] && _paths[i]->bonded()) {
  375. totalIncompleteAllocation += _paths[i]->_allocation;
  376. }
  377. }
  378. entropy %= totalIncompleteAllocation;
  379. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  380. if (_paths[i] && _paths[i]->bonded()) {
  381. SharedPtr<Link> link = RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  382. _paths[i]->address().toString(curPathStr);
  383. uint8_t probabilitySegment = (_totalBondUnderload > 0) ? _paths[i]->_affinity : _paths[i]->_allocation;
  384. if (entropy <= probabilitySegment) {
  385. idx = i;
  386. break;
  387. }
  388. entropy -= probabilitySegment;
  389. }
  390. }
  391. if (idx < ZT_MAX_PEER_NETWORK_PATHS) {
  392. if (flow->_assignedPath) {
  393. flow->_previouslyAssignedPath = flow->_assignedPath;
  394. }
  395. flow->assignPath(_paths[idx],now);
  396. ++(_paths[idx]->_assignedFlowCount);
  397. }
  398. else {
  399. fprintf(stderr, "could not assign flow?\n"); exit(0); // TODO: Remove for production
  400. return false;
  401. }
  402. }
  403. flow->assignedPath()->address().toString(curPathStr);
  404. SharedPtr<Link> link = RR->bc->getLinkBySocket(_policyAlias, flow->assignedPath()->localSocket());
  405. sprintf(traceMsg, "%s (bond) Assigned outgoing flow %x to peer %llx to link %s/%s, %lu active flow(s)",
  406. OSUtils::humanReadableTimestamp().c_str(), flow->id(), _peer->_id.address().toInt(), link->ifname().c_str(), curPathStr, _flows.size());
  407. RR->t->bondStateMessage(NULL, traceMsg);
  408. return true;
  409. }
  410. SharedPtr<Flow> Bond::createFlow(const SharedPtr<Path> &path, int32_t flowId, unsigned char entropy, int64_t now)
  411. {
  412. char traceMsg[256];
  413. char curPathStr[128];
  414. // ---
  415. if (!_numBondedPaths) {
  416. sprintf(traceMsg, "%s (bond) There are no bonded paths to peer %llx, cannot assign flow %x\n",
  417. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), flowId);
  418. RR->t->bondStateMessage(NULL, traceMsg);
  419. return SharedPtr<Flow>();
  420. }
  421. if (_flows.size() >= ZT_FLOW_MAX_COUNT) {
  422. sprintf(traceMsg, "%s (bond) Maximum number of flows on bond to peer %llx reached (%d), forcibly forgetting oldest flow\n",
  423. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), ZT_FLOW_MAX_COUNT);
  424. RR->t->bondStateMessage(NULL, traceMsg);
  425. forgetFlowsWhenNecessary(0, true, now);
  426. }
  427. SharedPtr<Flow> flow = new Flow(flowId, now);
  428. _flows[flowId] = flow;
  429. /**
  430. * Add a flow with a given Path already provided. This is the case when a packet
  431. * is received on a path but no flow exists, in this case we simply assign the path
  432. * that the remote peer chose for us.
  433. */
  434. if (path) {
  435. flow->assignPath(path,now);
  436. path->address().toString(curPathStr);
  437. path->_assignedFlowCount++;
  438. SharedPtr<Link> link = RR->bc->getLinkBySocket(_policyAlias, flow->assignedPath()->localSocket());
  439. sprintf(traceMsg, "%s (bond) Assigned incoming flow %x from peer %llx to link %s/%s, %lu active flow(s)",
  440. OSUtils::humanReadableTimestamp().c_str(), flow->id(), _peer->_id.address().toInt(), link->ifname().c_str(), curPathStr, _flows.size());
  441. RR->t->bondStateMessage(NULL, traceMsg);
  442. }
  443. /**
  444. * Add a flow when no path was provided. This means that it is an outgoing packet
  445. * and that it is up to the local peer to decide how to load-balance its transmission.
  446. */
  447. else if (!path) {
  448. assignFlowToBondedPath(flow, now);
  449. }
  450. return flow;
  451. }
  452. void Bond::forgetFlowsWhenNecessary(uint64_t age, bool oldest, int64_t now)
  453. {
  454. char traceMsg[256];
  455. std::map<int32_t,SharedPtr<Flow> >::iterator it = _flows.begin();
  456. std::map<int32_t,SharedPtr<Flow> >::iterator oldestFlow = _flows.end();
  457. SharedPtr<Flow> expiredFlow;
  458. if (age) { // Remove by specific age
  459. while (it != _flows.end()) {
  460. if (it->second->age(now) > age) {
  461. sprintf(traceMsg, "%s (bond) Forgetting flow %x between this node and peer %llx, %lu active flow(s)",
  462. OSUtils::humanReadableTimestamp().c_str(), it->first, _peer->_id.address().toInt(), (_flows.size()-1));
  463. RR->t->bondStateMessage(NULL, traceMsg);
  464. it->second->assignedPath()->_assignedFlowCount--;
  465. it = _flows.erase(it);
  466. } else {
  467. ++it;
  468. }
  469. }
  470. }
  471. else if (oldest) { // Remove single oldest by natural expiration
  472. uint64_t maxAge = 0;
  473. while (it != _flows.end()) {
  474. if (it->second->age(now) > maxAge) {
  475. maxAge = (now - it->second->age(now));
  476. oldestFlow = it;
  477. }
  478. ++it;
  479. }
  480. if (oldestFlow != _flows.end()) {
  481. sprintf(traceMsg, "%s (bond) Forgetting oldest flow %x (of age %llu) between this node and peer %llx, %lu active flow(s)",
  482. OSUtils::humanReadableTimestamp().c_str(), oldestFlow->first, oldestFlow->second->age(now), _peer->_id.address().toInt(), (_flows.size()-1));
  483. RR->t->bondStateMessage(NULL, traceMsg);
  484. oldestFlow->second->assignedPath()->_assignedFlowCount--;
  485. _flows.erase(oldestFlow);
  486. }
  487. }
  488. }
  489. void Bond::processIncomingPathNegotiationRequest(uint64_t now, SharedPtr<Path> &path, int16_t remoteUtility)
  490. {
  491. char traceMsg[256];
  492. if (_abLinkSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  493. return;
  494. }
  495. Mutex::Lock _l(_paths_m);
  496. char pathStr[128];
  497. path->address().toString(pathStr);
  498. if (!_lastPathNegotiationCheck) {
  499. return;
  500. }
  501. SharedPtr<Link> link = RR->bc->getLinkBySocket(_policyAlias, path->localSocket());
  502. if (remoteUtility > _localUtility) {
  503. char pathStr[128]; path->address().toString(pathStr);
  504. sprintf(traceMsg, "%s (bond) Peer %llx suggests using alternate link %s/%s. Remote utility (%d) is GREATER than local utility (%d), switching to said link\n",
  505. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), link->ifname().c_str(), pathStr, remoteUtility, _localUtility);
  506. RR->t->bondStateMessage(NULL, traceMsg);
  507. negotiatedPath = path;
  508. }
  509. if (remoteUtility < _localUtility) {
  510. sprintf(traceMsg, "%s (bond) Peer %llx suggests using alternate link %s/%s. Remote utility (%d) is LESS than local utility (%d), not switching\n",
  511. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), link->ifname().c_str(), pathStr, remoteUtility, _localUtility);
  512. RR->t->bondStateMessage(NULL, traceMsg);
  513. }
  514. if (remoteUtility == _localUtility) {
  515. sprintf(traceMsg, "%s (bond) Peer %llx suggests using alternate link %s/%s. Remote utility (%d) is equal to local utility (%d)\n",
  516. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), link->ifname().c_str(), pathStr, remoteUtility, _localUtility);
  517. RR->t->bondStateMessage(NULL, traceMsg);
  518. if (_peer->_id.address().toInt() > RR->node->identity().address().toInt()) {
  519. sprintf(traceMsg, "%s (bond) Agreeing with peer %llx to use alternate link %s/%s\n",
  520. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), link->ifname().c_str(), pathStr);
  521. RR->t->bondStateMessage(NULL, traceMsg);
  522. negotiatedPath = path;
  523. } else {
  524. sprintf(traceMsg, "%s (bond) Ignoring petition from peer %llx to use alternate link %s/%s\n",
  525. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), link->ifname().c_str(), pathStr);
  526. RR->t->bondStateMessage(NULL, traceMsg);
  527. }
  528. }
  529. }
  530. void Bond::pathNegotiationCheck(void *tPtr, const int64_t now)
  531. {
  532. char pathStr[128];
  533. int maxInPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
  534. int maxOutPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
  535. uint64_t maxInCount = 0;
  536. uint64_t maxOutCount = 0;
  537. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  538. if (!_paths[i]) {
  539. continue;
  540. }
  541. if (_paths[i]->_packetsIn > maxInCount) {
  542. maxInCount = _paths[i]->_packetsIn;
  543. maxInPathIdx = i;
  544. }
  545. if (_paths[i]->_packetsOut > maxOutCount) {
  546. maxOutCount = _paths[i]->_packetsOut;
  547. maxOutPathIdx = i;
  548. }
  549. _paths[i]->resetPacketCounts();
  550. }
  551. bool _peerLinksSynchronized = ((maxInPathIdx != ZT_MAX_PEER_NETWORK_PATHS)
  552. && (maxOutPathIdx != ZT_MAX_PEER_NETWORK_PATHS)
  553. && (maxInPathIdx != maxOutPathIdx)) ? false : true;
  554. /**
  555. * Determine utility and attempt to petition remote peer to switch to our chosen path
  556. */
  557. if (!_peerLinksSynchronized) {
  558. _localUtility = _paths[maxOutPathIdx]->_failoverScore - _paths[maxInPathIdx]->_failoverScore;
  559. if (_paths[maxOutPathIdx]->_negotiated) {
  560. _localUtility -= ZT_MULTIPATH_FAILOVER_HANDICAP_NEGOTIATED;
  561. }
  562. if ((now - _lastSentPathNegotiationRequest) > ZT_PATH_NEGOTIATION_CUTOFF_TIME) {
  563. //fprintf(stderr, "BT: (sync) it's been long enough, sending more requests.\n");
  564. _numSentPathNegotiationRequests = 0;
  565. }
  566. if (_numSentPathNegotiationRequests < ZT_PATH_NEGOTIATION_TRY_COUNT) {
  567. if (_localUtility >= 0) {
  568. //fprintf(stderr, "BT: (sync) paths appear to be out of sync (utility=%d)\n", _localUtility);
  569. sendPATH_NEGOTIATION_REQUEST(tPtr, _paths[maxOutPathIdx]);
  570. ++_numSentPathNegotiationRequests;
  571. _lastSentPathNegotiationRequest = now;
  572. _paths[maxOutPathIdx]->address().toString(pathStr);
  573. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _paths[maxOutPathIdx]->localSocket());
  574. //fprintf(stderr, "sending request to use %s on %s, ls=%llx, utility=%d\n", pathStr, link->ifname().c_str(), _paths[maxOutPathIdx]->localSocket(), _localUtility);
  575. }
  576. }
  577. /**
  578. * Give up negotiating and consider switching
  579. */
  580. else if ((now - _lastSentPathNegotiationRequest) > (2 * ZT_PATH_NEGOTIATION_CHECK_INTERVAL)) {
  581. if (_localUtility == 0) {
  582. // There's no loss to us, just switch without sending a another request
  583. //fprintf(stderr, "BT: (sync) giving up, switching to remote peer's path.\n");
  584. negotiatedPath = _paths[maxInPathIdx];
  585. }
  586. }
  587. }
  588. }
  589. void Bond::sendPATH_NEGOTIATION_REQUEST(void *tPtr, const SharedPtr<Path> &path)
  590. {
  591. char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  592. sprintf(traceMsg, "%s (bond) Sending link negotiation request to peer %llx via link %s/%s, local utility is %d",
  593. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), getLink(path)->ifname().c_str(), pathStr, _localUtility);
  594. RR->t->bondStateMessage(NULL, traceMsg);
  595. if (_abLinkSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  596. return;
  597. }
  598. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_PATH_NEGOTIATION_REQUEST);
  599. outp.append<int16_t>(_localUtility);
  600. if (path->address()) {
  601. outp.armor(_peer->key(),false,_peer->aesKeysIfSupported());
  602. RR->node->putPacket(tPtr,path->localSocket(),path->address(),outp.data(),outp.size());
  603. }
  604. }
  605. void Bond::sendACK(void *tPtr, const SharedPtr<Path> &path,const int64_t localSocket,
  606. const InetAddress &atAddress,int64_t now)
  607. {
  608. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_ACK);
  609. int32_t bytesToAck = 0;
  610. std::map<uint64_t,uint16_t>::iterator it = path->ackStatsIn.begin();
  611. while (it != path->ackStatsIn.end()) {
  612. bytesToAck += it->second;
  613. ++it;
  614. }
  615. //char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  616. //sprintf(traceMsg, "%s (qos) Sending ACK packet for %d bytes to peer %llx via link %s/%s",
  617. // OSUtils::humanReadableTimestamp().c_str(), bytesToAck, _peer->_id.address().toInt(), getLink(path)->ifname().c_str(), pathStr);
  618. //RR->t->bondStateMessage(NULL, traceMsg);
  619. outp.append<uint32_t>(bytesToAck);
  620. if (atAddress) {
  621. outp.armor(_peer->key(),false,_peer->aesKeysIfSupported());
  622. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  623. } else {
  624. RR->sw->send(tPtr,outp,false);
  625. }
  626. path->ackStatsIn.clear();
  627. path->_packetsReceivedSinceLastAck = 0;
  628. path->_lastAckSent = now;
  629. }
  630. void Bond::sendQOS_MEASUREMENT(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,
  631. const InetAddress &atAddress,int64_t now)
  632. {
  633. //char traceMsg[256]; char pathStr[128]; path->address().toString(pathStr);
  634. //sprintf(traceMsg, "%s (qos) Sending QoS packet to peer %llx via link %s/%s",
  635. // OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), getLink(path)->ifname().c_str(), pathStr);
  636. //RR->t->bondStateMessage(NULL, traceMsg);
  637. const int64_t _now = RR->node->now();
  638. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_QOS_MEASUREMENT);
  639. char qosData[ZT_QOS_MAX_PACKET_SIZE];
  640. int16_t len = generateQoSPacket(path, _now,qosData);
  641. outp.append(qosData,len);
  642. if (atAddress) {
  643. outp.armor(_peer->key(),false,_peer->aesKeysIfSupported());
  644. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  645. } else {
  646. RR->sw->send(tPtr,outp,false);
  647. }
  648. // Account for the fact that a VERB_QOS_MEASUREMENT was just sent. Reset timers.
  649. path->_packetsReceivedSinceLastQoS = 0;
  650. path->_lastQoSMeasurement = now;
  651. }
  652. void Bond::processBackgroundTasks(void *tPtr, const int64_t now)
  653. {
  654. Mutex::Lock _l(_paths_m);
  655. if (!_peer->_canUseMultipath || (now - _lastBackgroundTaskCheck) < ZT_BOND_BACKGROUND_TASK_MIN_INTERVAL) {
  656. return;
  657. }
  658. _lastBackgroundTaskCheck = now;
  659. // Compute dynamic path monitor timer interval
  660. if (_linkMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC) {
  661. int suggestedMonitorInterval = (now - _lastFrame) / 100;
  662. _dynamicPathMonitorInterval = std::min(ZT_PATH_HEARTBEAT_PERIOD, ((suggestedMonitorInterval > _bondMonitorInterval) ? suggestedMonitorInterval : _bondMonitorInterval));
  663. }
  664. // TODO: Clarify and generalize this logic
  665. if (_linkMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC) {
  666. _shouldCollectPathStatistics = true;
  667. }
  668. // Memoize oft-used properties in the packet ingress/egress logic path
  669. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  670. // Required for real-time balancing
  671. _shouldCollectPathStatistics = true;
  672. }
  673. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  674. if (_abLinkSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_BETTER) {
  675. // Required for judging suitability of primary link after recovery
  676. _shouldCollectPathStatistics = true;
  677. }
  678. if (_abLinkSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  679. // Required for judging suitability of new candidate primary
  680. _shouldCollectPathStatistics = true;
  681. }
  682. }
  683. if ((now - _lastCheckUserPreferences) > 1000) {
  684. _lastCheckUserPreferences = now;
  685. applyUserPrefs();
  686. }
  687. curateBond(now,false);
  688. if ((now - _lastQualityEstimation) > _qualityEstimationInterval) {
  689. _lastQualityEstimation = now;
  690. estimatePathQuality(now);
  691. }
  692. dumpInfo(now);
  693. // Send QOS/ACK packets as needed
  694. if (_shouldCollectPathStatistics) {
  695. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  696. if (_paths[i] && _paths[i]->allowed()) {
  697. if (_paths[i]->needsToSendQoS(now,_qosSendInterval)) {
  698. sendQOS_MEASUREMENT(tPtr, _paths[i], _paths[i]->localSocket(), _paths[i]->address(), now);
  699. }
  700. if (_paths[i]->needsToSendAck(now,_ackSendInterval)) {
  701. sendACK(tPtr, _paths[i], _paths[i]->localSocket(), _paths[i]->address(), now);
  702. }
  703. }
  704. }
  705. }
  706. // Perform periodic background tasks unique to each bonding policy
  707. switch (_bondingPolicy)
  708. {
  709. case ZT_BONDING_POLICY_ACTIVE_BACKUP:
  710. processActiveBackupTasks(tPtr, now);
  711. break;
  712. case ZT_BONDING_POLICY_BROADCAST:
  713. break;
  714. case ZT_BONDING_POLICY_BALANCE_RR:
  715. case ZT_BONDING_POLICY_BALANCE_XOR:
  716. case ZT_BONDING_POLICY_BALANCE_AWARE:
  717. processBalanceTasks(now);
  718. break;
  719. default:
  720. break;
  721. }
  722. // Check whether or not a path negotiation needs to be performed
  723. if (((now - _lastPathNegotiationCheck) > ZT_PATH_NEGOTIATION_CHECK_INTERVAL) && _allowPathNegotiation) {
  724. _lastPathNegotiationCheck = now;
  725. pathNegotiationCheck(tPtr, now);
  726. }
  727. }
  728. void Bond::applyUserPrefs()
  729. {
  730. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  731. if (!_paths[i]) {
  732. continue;
  733. }
  734. SharedPtr<Link> sl = getLink(_paths[i]);
  735. if (sl) {
  736. if (sl->monitorInterval() == 0) { // If no interval was specified for this link, use more generic bond-wide interval
  737. sl->setMonitorInterval(_bondMonitorInterval);
  738. }
  739. RR->bc->setMinReqPathMonitorInterval((sl->monitorInterval() < RR->bc->minReqPathMonitorInterval()) ? sl->monitorInterval() : RR->bc->minReqPathMonitorInterval());
  740. bool bFoundCommonLink = false;
  741. SharedPtr<Link> commonLink =RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  742. for(unsigned int j=0;j<ZT_MAX_PEER_NETWORK_PATHS;++j) {
  743. if (_paths[j] && _paths[j].ptr() != _paths[i].ptr()) {
  744. if (RR->bc->getLinkBySocket(_policyAlias, _paths[j]->localSocket()) == commonLink) {
  745. bFoundCommonLink = true;
  746. }
  747. }
  748. }
  749. _paths[i]->_monitorInterval = sl->monitorInterval();
  750. _paths[i]->_upDelay = sl->upDelay() ? sl->upDelay() : _upDelay;
  751. _paths[i]->_downDelay = sl->downDelay() ? sl->downDelay() : _downDelay;
  752. _paths[i]->_ipvPref = sl->ipvPref();
  753. _paths[i]->_mode = sl->mode();
  754. _paths[i]->_enabled = sl->enabled();
  755. _paths[i]->_onlyPathOnLink = !bFoundCommonLink;
  756. }
  757. }
  758. if (_peer) {
  759. _peer->_shouldCollectPathStatistics = _shouldCollectPathStatistics;
  760. _peer->_bondingPolicy = _bondingPolicy;
  761. }
  762. }
  763. void Bond::curateBond(const int64_t now, bool rebuildBond)
  764. {
  765. char traceMsg[256];
  766. char pathStr[128];
  767. uint8_t tmpNumAliveLinks = 0;
  768. uint8_t tmpNumTotalLinks = 0;
  769. /**
  770. * Update path states
  771. */
  772. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  773. if (!_paths[i]) {
  774. continue;
  775. }
  776. tmpNumTotalLinks++;
  777. if (_paths[i]->alive(now, true)) {
  778. tmpNumAliveLinks++;
  779. }
  780. bool currEligibility = _paths[i]->eligible(now,_ackSendInterval);
  781. if (currEligibility != _paths[i]->_lastEligibilityState) {
  782. _paths[i]->address().toString(pathStr);
  783. char traceMsg[256]; _paths[i]->address().toString(pathStr);
  784. sprintf(traceMsg, "%s (bond) Eligibility of link %s/%s to peer %llx has changed from %d to %d",
  785. OSUtils::humanReadableTimestamp().c_str(), getLink(_paths[i])->ifname().c_str(), pathStr, _peer->_id.address().toInt(), _paths[i]->_lastEligibilityState, currEligibility);
  786. RR->t->bondStateMessage(NULL, traceMsg);
  787. if (currEligibility) {
  788. rebuildBond = true;
  789. }
  790. if (!currEligibility) {
  791. _paths[i]->adjustRefractoryPeriod(now, _defaultPathRefractoryPeriod, !currEligibility);
  792. if (_paths[i]->bonded()) {
  793. char pathStr[128]; _paths[i]->address().toString(pathStr);
  794. sprintf(traceMsg, "%s (bond) Link %s/%s to peer %llx was bonded, reallocation of its flows will occur soon",
  795. OSUtils::humanReadableTimestamp().c_str(), getLink(_paths[i])->ifname().c_str(), pathStr, _peer->_id.address().toInt());
  796. RR->t->bondStateMessage(NULL, traceMsg);
  797. rebuildBond = true;
  798. _paths[i]->_shouldReallocateFlows = _paths[i]->bonded();
  799. _paths[i]->setBonded(false);
  800. } else {
  801. sprintf(traceMsg, "%s (bond) Link %s/%s to peer %llx was not bonded, no allocation consequences",
  802. OSUtils::humanReadableTimestamp().c_str(), getLink(_paths[i])->ifname().c_str(), pathStr, _peer->_id.address().toInt());
  803. RR->t->bondStateMessage(NULL, traceMsg);
  804. }
  805. }
  806. }
  807. if (currEligibility) {
  808. _paths[i]->adjustRefractoryPeriod(now, _defaultPathRefractoryPeriod, false);
  809. }
  810. _paths[i]->_lastEligibilityState = currEligibility;
  811. }
  812. _numAliveLinks = tmpNumAliveLinks;
  813. _numTotalLinks = tmpNumTotalLinks;
  814. /* Determine health status to report to user */
  815. bool tmpHealthStatus = true;
  816. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  817. if (_numAliveLinks < 2) {
  818. // Considered healthy if there is at least one failover link
  819. tmpHealthStatus = false;
  820. }
  821. }
  822. if (_bondingPolicy == ZT_BONDING_POLICY_BROADCAST) {
  823. if (_numAliveLinks < 1) {
  824. // Considerd healthy if we're able to send frames at all
  825. tmpHealthStatus = false;
  826. }
  827. }
  828. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR) {
  829. if (_numAliveLinks < _numTotalLinks) {
  830. // Considerd healthy if all known paths are alive, this should be refined to account for user bond config settings
  831. tmpHealthStatus = false;
  832. }
  833. }
  834. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR) {
  835. if (_numAliveLinks < _numTotalLinks) {
  836. // Considerd healthy if all known paths are alive, this should be refined to account for user bond config settings
  837. tmpHealthStatus = false;
  838. }
  839. }
  840. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  841. if (_numAliveLinks < _numTotalLinks) {
  842. // Considerd healthy if all known paths are alive, this should be refined to account for user bond config settings
  843. tmpHealthStatus = false;
  844. }
  845. }
  846. if (tmpHealthStatus != _isHealthy) {
  847. std::string healthStatusStr;
  848. if (tmpHealthStatus == true) {
  849. healthStatusStr = "HEALTHY";
  850. } else {
  851. healthStatusStr = "DEGRADED";
  852. }
  853. sprintf(traceMsg, "%s (bond) Bond to peer %llx is in a %s state (%d/%d links)",
  854. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), healthStatusStr.c_str(), _numAliveLinks, _numTotalLinks);
  855. RR->t->bondStateMessage(NULL, traceMsg);
  856. }
  857. _isHealthy = tmpHealthStatus;
  858. /**
  859. * Curate the set of paths that are part of the bond proper. Selects a single path
  860. * per logical link according to eligibility and user-specified constraints.
  861. */
  862. if ((_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR)
  863. || (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR)
  864. || (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE)) {
  865. if (!_numBondedPaths) {
  866. rebuildBond = true;
  867. }
  868. // TODO: Optimize
  869. if (rebuildBond) {
  870. int updatedBondedPathCount = 0;
  871. std::map<SharedPtr<Link>,int> linkMap;
  872. for (int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  873. if (_paths[i] && _paths[i]->allowed() && (_paths[i]->eligible(now,_ackSendInterval) || !_numBondedPaths)) {
  874. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  875. if (!linkMap.count(link)) {
  876. linkMap[link] = i;
  877. }
  878. else {
  879. bool overriden = false;
  880. _paths[i]->address().toString(pathStr);
  881. //fprintf(stderr, " link representative path already exists! (%s %s)\n", getLink(_paths[i])->ifname().c_str(), pathStr);
  882. if (_paths[i]->preferred() && !_paths[linkMap[link]]->preferred()) {
  883. // Override previous choice if preferred
  884. if (_paths[linkMap[link]]->_assignedFlowCount) {
  885. _paths[linkMap[link]]->_deprecated = true;
  886. }
  887. else {
  888. _paths[linkMap[link]]->_deprecated = true;
  889. _paths[linkMap[link]]->setBonded(false);
  890. }
  891. linkMap[link] = i;
  892. overriden = true;
  893. }
  894. if ((_paths[i]->preferred() && _paths[linkMap[link]]->preferred())
  895. || (!_paths[i]->preferred() && !_paths[linkMap[link]]->preferred())) {
  896. if (_paths[i]->preferenceRank() > _paths[linkMap[link]]->preferenceRank()) {
  897. // Override if higher preference
  898. if (_paths[linkMap[link]]->_assignedFlowCount) {
  899. _paths[linkMap[link]]->_deprecated = true;
  900. }
  901. else {
  902. _paths[linkMap[link]]->_deprecated = true;
  903. _paths[linkMap[link]]->setBonded(false);
  904. }
  905. linkMap[link] = i;
  906. }
  907. }
  908. }
  909. }
  910. }
  911. std::map<SharedPtr<Link>,int>::iterator it = linkMap.begin();
  912. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  913. if (!_paths[i]) {
  914. continue;
  915. }
  916. _bondedIdx[i] = ZT_MAX_PEER_NETWORK_PATHS;
  917. if (it != linkMap.end()) {
  918. _bondedIdx[i] = it->second;
  919. _paths[_bondedIdx[i]]->setBonded(true);
  920. ++it;
  921. ++updatedBondedPathCount;
  922. _paths[_bondedIdx[i]]->address().toString(pathStr);
  923. //fprintf(stderr, "setting i=%d, _bondedIdx[%d]=%d to bonded (%s %s)\n", i, i, _bondedIdx[i], getLink(_paths[_bondedIdx[i]])->ifname().c_str(), pathStr);
  924. }
  925. }
  926. _numBondedPaths = updatedBondedPathCount;
  927. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR) {
  928. // Cause a RR reset since the currently used index might no longer be valid
  929. _rrPacketsSentOnCurrLink = _packetsPerLink;
  930. }
  931. }
  932. }
  933. }
  934. void Bond::estimatePathQuality(const int64_t now)
  935. {
  936. char pathStr[128];
  937. uint32_t totUserSpecifiedLinkSpeed = 0;
  938. if (_numBondedPaths) { // Compute relative user-specified speeds of links
  939. for(unsigned int i=0;i<_numBondedPaths;++i) {
  940. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  941. if (_paths[i] && _paths[i]->allowed()) {
  942. totUserSpecifiedLinkSpeed += link->speed();
  943. }
  944. }
  945. for(unsigned int i=0;i<_numBondedPaths;++i) {
  946. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  947. if (_paths[i] && _paths[i]->allowed()) {
  948. link->setRelativeSpeed(round( ((float)link->speed() / (float)totUserSpecifiedLinkSpeed) * 255));
  949. }
  950. }
  951. }
  952. float lat[ZT_MAX_PEER_NETWORK_PATHS];
  953. float pdv[ZT_MAX_PEER_NETWORK_PATHS];
  954. float plr[ZT_MAX_PEER_NETWORK_PATHS];
  955. float per[ZT_MAX_PEER_NETWORK_PATHS];
  956. float maxLAT = 0;
  957. float maxPDV = 0;
  958. float maxPLR = 0;
  959. float maxPER = 0;
  960. float quality[ZT_MAX_PEER_NETWORK_PATHS];
  961. uint8_t alloc[ZT_MAX_PEER_NETWORK_PATHS];
  962. float totQuality = 0.0f;
  963. memset(&lat, 0, sizeof(lat));
  964. memset(&pdv, 0, sizeof(pdv));
  965. memset(&plr, 0, sizeof(plr));
  966. memset(&per, 0, sizeof(per));
  967. memset(&quality, 0, sizeof(quality));
  968. memset(&alloc, 0, sizeof(alloc));
  969. // Compute initial summary statistics
  970. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  971. if (!_paths[i] || !_paths[i]->allowed()) {
  972. continue;
  973. }
  974. // Compute/Smooth average of real-world observations
  975. _paths[i]->_latencyMean = _paths[i]->latencySamples.mean();
  976. _paths[i]->_latencyVariance = _paths[i]->latencySamples.stddev();
  977. _paths[i]->_packetErrorRatio = 1.0 - (_paths[i]->packetValiditySamples.count() ? _paths[i]->packetValiditySamples.mean() : 1.0);
  978. if (userHasSpecifiedLinkSpeeds()) {
  979. // Use user-reported metrics
  980. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  981. if (link) {
  982. _paths[i]->_throughputMean = link->speed();
  983. _paths[i]->_throughputVariance = 0;
  984. }
  985. }
  986. // Drain unacknowledged QoS records
  987. std::map<uint64_t,uint64_t>::iterator it = _paths[i]->qosStatsOut.begin();
  988. uint64_t currentLostRecords = 0;
  989. while (it != _paths[i]->qosStatsOut.end()) {
  990. int qosRecordTimeout = 5000; //_paths[i]->monitorInterval() * ZT_MULTIPATH_QOS_ACK_INTERVAL_MULTIPLIER * 8;
  991. if ((now - it->second) >= qosRecordTimeout) {
  992. // Packet was lost
  993. it = _paths[i]->qosStatsOut.erase(it);
  994. ++currentLostRecords;
  995. } else { ++it; }
  996. }
  997. quality[i]=0;
  998. totQuality=0;
  999. // Normalize raw observations according to sane limits and/or user specified values
  1000. lat[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_latencyMean, 0, _maxAcceptableLatency, 0, 1));
  1001. pdv[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_latencyVariance, 0, _maxAcceptablePacketDelayVariance, 0, 1));
  1002. plr[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_packetLossRatio, 0, _maxAcceptablePacketLossRatio, 0, 1));
  1003. per[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_packetErrorRatio, 0, _maxAcceptablePacketErrorRatio, 0, 1));
  1004. // Record bond-wide maximums to determine relative values
  1005. maxLAT = lat[i] > maxLAT ? lat[i] : maxLAT;
  1006. maxPDV = pdv[i] > maxPDV ? pdv[i] : maxPDV;
  1007. maxPLR = plr[i] > maxPLR ? plr[i] : maxPLR;
  1008. maxPER = per[i] > maxPER ? per[i] : maxPER;
  1009. }
  1010. // Convert metrics to relative quantities and apply contribution weights
  1011. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1012. if (_paths[i] && _paths[i]->bonded()) {
  1013. quality[i] += ((maxLAT > 0.0f ? lat[i] / maxLAT : 0.0f) * _qualityWeights[ZT_QOS_LAT_IDX]);
  1014. quality[i] += ((maxPDV > 0.0f ? pdv[i] / maxPDV : 0.0f) * _qualityWeights[ZT_QOS_PDV_IDX]);
  1015. quality[i] += ((maxPLR > 0.0f ? plr[i] / maxPLR : 0.0f) * _qualityWeights[ZT_QOS_PLR_IDX]);
  1016. quality[i] += ((maxPER > 0.0f ? per[i] / maxPER : 0.0f) * _qualityWeights[ZT_QOS_PER_IDX]);
  1017. totQuality += quality[i];
  1018. }
  1019. }
  1020. // Normalize to 8-bit allocation values
  1021. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1022. if (_paths[i] && _paths[i]->bonded()) {
  1023. alloc[i] = std::ceil((quality[i] / totQuality) * (float)255);
  1024. _paths[i]->_allocation = alloc[i];
  1025. }
  1026. }
  1027. }
  1028. void Bond::processBalanceTasks(const int64_t now)
  1029. {
  1030. char curPathStr[128];
  1031. // TODO: Generalize
  1032. int totalAllocation = 0;
  1033. for (int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1034. if (!_paths[i]) {
  1035. continue;
  1036. }
  1037. if (_paths[i] && _paths[i]->bonded() && _paths[i]->eligible(now,_ackSendInterval)) {
  1038. totalAllocation+=_paths[i]->_allocation;
  1039. }
  1040. }
  1041. unsigned char minimumAllocationValue = 0.33 * ((float)totalAllocation / (float)_numBondedPaths);
  1042. if (_allowFlowHashing) {
  1043. /**
  1044. * Clean up and reset flows if necessary
  1045. */
  1046. if ((now - _lastFlowExpirationCheck) > ZT_MULTIPATH_FLOW_CHECK_INTERVAL) {
  1047. Mutex::Lock _l(_flows_m);
  1048. forgetFlowsWhenNecessary(ZT_MULTIPATH_FLOW_EXPIRATION_INTERVAL,false,now);
  1049. _lastFlowExpirationCheck = now;
  1050. }
  1051. if ((now - _lastFlowStatReset) > ZT_FLOW_STATS_RESET_INTERVAL) {
  1052. Mutex::Lock _l(_flows_m);
  1053. _lastFlowStatReset = now;
  1054. std::map<int32_t,SharedPtr<Flow> >::iterator it = _flows.begin();
  1055. while (it != _flows.end()) {
  1056. it->second->resetByteCounts();
  1057. ++it;
  1058. }
  1059. }
  1060. /**
  1061. * Re-allocate flows from dead paths
  1062. */
  1063. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  1064. Mutex::Lock _l(_flows_m);
  1065. for (int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1066. if (!_paths[i]) {
  1067. continue;
  1068. }
  1069. if (!_paths[i]->eligible(now,_ackSendInterval) && _paths[i]->_shouldReallocateFlows) {
  1070. char traceMsg[256]; char pathStr[128]; _paths[i]->address().toString(pathStr);
  1071. sprintf(traceMsg, "%s (balance-*) Reallocating flows to peer %llx from dead link %s/%s to surviving links",
  1072. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), getLink(_paths[i])->ifname().c_str(), pathStr);
  1073. RR->t->bondStateMessage(NULL, traceMsg);
  1074. std::map<int32_t,SharedPtr<Flow> >::iterator flow_it = _flows.begin();
  1075. while (flow_it != _flows.end()) {
  1076. if (flow_it->second->assignedPath() == _paths[i]) {
  1077. if(assignFlowToBondedPath(flow_it->second, now)) {
  1078. _paths[i]->_assignedFlowCount--;
  1079. }
  1080. }
  1081. ++flow_it;
  1082. }
  1083. _paths[i]->_shouldReallocateFlows = false;
  1084. }
  1085. }
  1086. }
  1087. /**
  1088. * Re-allocate flows from under-performing
  1089. * NOTE: This could be part of the above block but was kept separate for clarity.
  1090. */
  1091. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  1092. Mutex::Lock _l(_flows_m);
  1093. for (int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1094. if (!_paths[i]) {
  1095. continue;
  1096. }
  1097. if (_paths[i] && _paths[i]->bonded() && _paths[i]->eligible(now,_ackSendInterval) && (_paths[i]->_allocation < minimumAllocationValue) && _paths[i]->_assignedFlowCount) {
  1098. _paths[i]->address().toString(curPathStr);
  1099. char traceMsg[256]; char pathStr[128]; _paths[i]->address().toString(pathStr);
  1100. sprintf(traceMsg, "%s (balance-*) Reallocating flows to peer %llx from under-performing link %s/%s\n",
  1101. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), getLink(_paths[i])->ifname().c_str(), pathStr);
  1102. RR->t->bondStateMessage(NULL, traceMsg);
  1103. std::map<int32_t,SharedPtr<Flow> >::iterator flow_it = _flows.begin();
  1104. while (flow_it != _flows.end()) {
  1105. if (flow_it->second->assignedPath() == _paths[i]) {
  1106. if(assignFlowToBondedPath(flow_it->second, now)) {
  1107. _paths[i]->_assignedFlowCount--;
  1108. }
  1109. }
  1110. ++flow_it;
  1111. }
  1112. _paths[i]->_shouldReallocateFlows = false;
  1113. }
  1114. }
  1115. }
  1116. }
  1117. /**
  1118. * Tasks specific to (Balance Round Robin)
  1119. */
  1120. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR) {
  1121. if (_allowFlowHashing) {
  1122. // TODO: Should ideally failover from (idx) to a random link, this is so that (idx+1) isn't overloaded
  1123. }
  1124. else if (!_allowFlowHashing) {
  1125. // Nothing
  1126. }
  1127. }
  1128. /**
  1129. * Tasks specific to (Balance XOR)
  1130. */
  1131. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR) {
  1132. // Nothing specific for XOR
  1133. }
  1134. /**
  1135. * Tasks specific to (Balance Aware)
  1136. */
  1137. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  1138. if (_allowFlowHashing) {
  1139. Mutex::Lock _l(_flows_m);
  1140. if (_flowRebalanceStrategy == ZT_MULTIPATH_FLOW_REBALANCE_STRATEGY_PASSIVE) {
  1141. // Do nothing here, this is taken care of in the more general case above.
  1142. }
  1143. if (_flowRebalanceStrategy == ZT_MULTIPATH_FLOW_REBALANCE_STRATEGY_OPPORTUNISTIC) {
  1144. // If the flow is temporarily inactive we should take this opportunity to re-assign the flow if needed.
  1145. }
  1146. if (_flowRebalanceStrategy == ZT_MULTIPATH_FLOW_REBALANCE_STRATEGY_AGGRESSIVE) {
  1147. /**
  1148. * Return flows to the original path if it has once again become available
  1149. */
  1150. if ((now - _lastFlowRebalance) > ZT_FLOW_REBALANCE_INTERVAL) {
  1151. std::map<int32_t,SharedPtr<Flow> >::iterator flow_it = _flows.begin();
  1152. while (flow_it != _flows.end()) {
  1153. if (flow_it->second->_previouslyAssignedPath && flow_it->second->_previouslyAssignedPath->eligible(now, _ackSendInterval)
  1154. && (flow_it->second->_previouslyAssignedPath->_allocation >= (minimumAllocationValue * 2))) {
  1155. //fprintf(stderr, "moving flow back onto its previous path assignment (based on eligibility)\n");
  1156. (flow_it->second->_assignedPath->_assignedFlowCount)--;
  1157. flow_it->second->assignPath(flow_it->second->_previouslyAssignedPath,now);
  1158. (flow_it->second->_previouslyAssignedPath->_assignedFlowCount)++;
  1159. }
  1160. ++flow_it;
  1161. }
  1162. _lastFlowRebalance = now;
  1163. }
  1164. /**
  1165. * Return flows to the original path if it has once again become (performant)
  1166. */
  1167. if ((now - _lastFlowRebalance) > ZT_FLOW_REBALANCE_INTERVAL) {
  1168. std::map<int32_t,SharedPtr<Flow> >::iterator flow_it = _flows.begin();
  1169. while (flow_it != _flows.end()) {
  1170. if (flow_it->second->_previouslyAssignedPath && flow_it->second->_previouslyAssignedPath->eligible(now, _ackSendInterval)
  1171. && (flow_it->second->_previouslyAssignedPath->_allocation >= (minimumAllocationValue * 2))) {
  1172. //fprintf(stderr, "moving flow back onto its previous path assignment (based on performance)\n");
  1173. (flow_it->second->_assignedPath->_assignedFlowCount)--;
  1174. flow_it->second->assignPath(flow_it->second->_previouslyAssignedPath,now);
  1175. (flow_it->second->_previouslyAssignedPath->_assignedFlowCount)++;
  1176. }
  1177. ++flow_it;
  1178. }
  1179. _lastFlowRebalance = now;
  1180. }
  1181. }
  1182. }
  1183. else if (!_allowFlowHashing) {
  1184. // Nothing
  1185. }
  1186. }
  1187. }
  1188. void Bond::dequeueNextActiveBackupPath(const uint64_t now)
  1189. {
  1190. if (_abFailoverQueue.empty()) {
  1191. return;
  1192. }
  1193. _abPath = _abFailoverQueue.front();
  1194. _abFailoverQueue.pop_front();
  1195. _lastActiveBackupPathChange = now;
  1196. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1197. if (_paths[i]) {
  1198. _paths[i]->resetPacketCounts();
  1199. }
  1200. }
  1201. }
  1202. void Bond::processActiveBackupTasks(void *tPtr, const int64_t now)
  1203. {
  1204. char traceMsg[256];
  1205. char pathStr[128];
  1206. char prevPathStr[128];
  1207. char curPathStr[128];
  1208. SharedPtr<Path> prevActiveBackupPath = _abPath;
  1209. SharedPtr<Path> nonPreferredPath;
  1210. bool bFoundPrimaryLink = false;
  1211. /**
  1212. * Generate periodic statuc report
  1213. */
  1214. if ((now - _lastBondStatusLog) > ZT_MULTIPATH_BOND_STATUS_INTERVAL) {
  1215. _lastBondStatusLog = now;
  1216. if (_abPath) {
  1217. _abPath->address().toString(curPathStr);
  1218. sprintf(traceMsg, "%s (active-backup) Active link to peer %llx is %s/%s, failover queue size is %zu",
  1219. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), getLink(_abPath)->ifname().c_str(), curPathStr, _abFailoverQueue.size());
  1220. RR->t->bondStateMessage(NULL, traceMsg);
  1221. } else {
  1222. sprintf(traceMsg, "%s (active-backup) No active link to peer %llx",
  1223. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt());
  1224. RR->t->bondStateMessage(NULL, traceMsg);
  1225. }
  1226. if (_abFailoverQueue.empty()) {
  1227. sprintf(traceMsg, "%s (active-backup) Failover queue is empty, bond to peer %llx is NOT currently fault-tolerant",
  1228. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt());
  1229. RR->t->bondStateMessage(NULL, traceMsg);
  1230. }
  1231. }
  1232. /**
  1233. * Select initial "active" active-backup link
  1234. */
  1235. if (!_abPath) {
  1236. /**
  1237. * [Automatic mode]
  1238. * The user has not explicitly specified links or their failover schedule,
  1239. * the bonding policy will now select the first eligible path and set it as
  1240. * its active backup path, if a substantially better path is detected the bonding
  1241. * policy will assign it as the new active backup path. If the path fails it will
  1242. * simply find the next eligible path.
  1243. */
  1244. if (!userHasSpecifiedLinks()) {
  1245. sprintf(traceMsg, "%s (active-backup) No links to peer %llx specified. Searching...",
  1246. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt()); RR->t->bondStateMessage(NULL, traceMsg);
  1247. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1248. if (_paths[i] && _paths[i]->eligible(now,_ackSendInterval)) {
  1249. _paths[i]->address().toString(curPathStr);
  1250. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  1251. if (link) {
  1252. sprintf(traceMsg, "%s (active-backup) Found eligible link %s/%s to peer %llx",
  1253. OSUtils::humanReadableTimestamp().c_str(), getLink(_paths[i])->ifname().c_str(), curPathStr, _peer->_id.address().toInt());
  1254. RR->t->bondStateMessage(NULL, traceMsg);
  1255. }
  1256. _abPath = _paths[i];
  1257. break;
  1258. }
  1259. }
  1260. }
  1261. /**
  1262. * [Manual mode]
  1263. * The user has specified links or failover rules that the bonding policy should adhere to.
  1264. */
  1265. else if (userHasSpecifiedLinks()) {
  1266. if (userHasSpecifiedPrimaryLink()) {
  1267. //sprintf(traceMsg, "%s (active-backup) Checking local.conf for user-specified primary link\n", OSUtils::humanReadableTimestamp().c_str());
  1268. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1269. if (!_paths[i]) {
  1270. continue;
  1271. }
  1272. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  1273. if (_paths[i]->eligible(now,_ackSendInterval) && link->primary()) {
  1274. if (!_paths[i]->preferred()) {
  1275. _paths[i]->address().toString(curPathStr);
  1276. // Found path on primary link, take note in case we don't find a preferred path
  1277. nonPreferredPath = _paths[i];
  1278. bFoundPrimaryLink = true;
  1279. }
  1280. if (_paths[i]->preferred()) {
  1281. _abPath = _paths[i];
  1282. _abPath->address().toString(curPathStr);
  1283. SharedPtr<Link> link = RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  1284. bFoundPrimaryLink = true;
  1285. break; // Found preferred path %s on primary link
  1286. }
  1287. }
  1288. }
  1289. if (_abPath) {
  1290. _abPath->address().toString(curPathStr);
  1291. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _abPath->localSocket());
  1292. if (link) {
  1293. sprintf(traceMsg, "%s (active-backup) Found preferred primary link %s/%s to peer %llx",
  1294. OSUtils::humanReadableTimestamp().c_str(), getLink(_abPath)->ifname().c_str(), curPathStr, _peer->_id.address().toInt());
  1295. RR->t->bondStateMessage(NULL, traceMsg);
  1296. }
  1297. }
  1298. else {
  1299. if (bFoundPrimaryLink && nonPreferredPath) {
  1300. sprintf(traceMsg, "%s (active-backup) Found non-preferred primary link to peer %llx",
  1301. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt());
  1302. RR->t->bondStateMessage(NULL, traceMsg);
  1303. _abPath = nonPreferredPath;
  1304. }
  1305. }
  1306. if (!_abPath) {
  1307. sprintf(traceMsg, "%s (active-backup) Designated primary link to peer %llx is not yet ready",
  1308. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt());
  1309. RR->t->bondStateMessage(NULL, traceMsg);
  1310. // TODO: Should wait for some time (failover interval?) and then swtich to spare link
  1311. }
  1312. }
  1313. else if (!userHasSpecifiedPrimaryLink()) {
  1314. int _abIdx = ZT_MAX_PEER_NETWORK_PATHS;
  1315. sprintf(traceMsg, "%s (active-backup) User did not specify a primary link to peer %llx, selecting first available link",
  1316. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt());
  1317. RR->t->bondStateMessage(NULL, traceMsg);
  1318. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1319. if (_paths[i] && _paths[i]->eligible(now,_ackSendInterval)) {
  1320. _abIdx = i;
  1321. break;
  1322. }
  1323. }
  1324. if (_abIdx == ZT_MAX_PEER_NETWORK_PATHS) {
  1325. // Unable to find a candidate next-best, no change
  1326. }
  1327. else {
  1328. _abPath = _paths[_abIdx];
  1329. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _abPath->localSocket());
  1330. if (link) {
  1331. _abPath->address().toString(curPathStr);
  1332. sprintf(traceMsg, "%s (active-backup) Selected non-primary link %s/%s to peer %llx",
  1333. OSUtils::humanReadableTimestamp().c_str(), getLink(_abPath)->ifname().c_str(), curPathStr, _peer->_id.address().toInt());
  1334. RR->t->bondStateMessage(NULL, traceMsg);
  1335. }
  1336. }
  1337. }
  1338. }
  1339. }
  1340. /**
  1341. * Update and maintain the active-backup failover queue
  1342. */
  1343. if (_abPath) {
  1344. // Don't worry about the failover queue until we have an active link
  1345. // Remove ineligible paths from the failover link queue
  1346. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();) {
  1347. if ((*it) && !(*it)->eligible(now,_ackSendInterval)) {
  1348. (*it)->address().toString(curPathStr);
  1349. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, (*it)->localSocket());
  1350. it = _abFailoverQueue.erase(it);
  1351. if (link) {
  1352. sprintf(traceMsg, "%s (active-backup) Link %s/%s to peer %llx is now ineligible, removing from failover queue, there are %zu links in the queue",
  1353. OSUtils::humanReadableTimestamp().c_str(), getLink(_abPath)->ifname().c_str(), curPathStr, _peer->_id.address().toInt(), _abFailoverQueue.size());
  1354. RR->t->bondStateMessage(NULL, traceMsg);
  1355. }
  1356. } else {
  1357. ++it;
  1358. }
  1359. }
  1360. /**
  1361. * Failover instructions were provided by user, build queue according those as well as IPv
  1362. * preference, disregarding performance.
  1363. */
  1364. if (userHasSpecifiedFailoverInstructions()) {
  1365. /**
  1366. * Clear failover scores
  1367. */
  1368. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1369. if (_paths[i]) {
  1370. _paths[i]->_failoverScore = 0;
  1371. }
  1372. }
  1373. // Follow user-specified failover instructions
  1374. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1375. if (!_paths[i] || !_paths[i]->allowed() || !_paths[i]->eligible(now,_ackSendInterval)) {
  1376. continue;
  1377. }
  1378. SharedPtr<Link> link =RR->bc->getLinkBySocket(_policyAlias, _paths[i]->localSocket());
  1379. _paths[i]->address().toString(pathStr);
  1380. int failoverScoreHandicap = _paths[i]->_failoverScore;
  1381. if (_paths[i]->preferred()) {
  1382. failoverScoreHandicap += ZT_MULTIPATH_FAILOVER_HANDICAP_PREFERRED;
  1383. }
  1384. if (link->primary()) {
  1385. // If using "optimize" primary reselect mode, ignore user link designations
  1386. failoverScoreHandicap += ZT_MULTIPATH_FAILOVER_HANDICAP_PRIMARY;
  1387. }
  1388. if (!_paths[i]->_failoverScore) {
  1389. // If we didn't inherit a failover score from a "parent" that wants to use this path as a failover
  1390. int newHandicap = failoverScoreHandicap ? failoverScoreHandicap : _paths[i]->_allocation;
  1391. _paths[i]->_failoverScore = newHandicap;
  1392. }
  1393. SharedPtr<Link> failoverLink;
  1394. if (link->failoverToLink().length()) {
  1395. failoverLink = RR->bc->getLinkByName(_policyAlias, link->failoverToLink());
  1396. }
  1397. if (failoverLink) {
  1398. for (int j=0; j<ZT_MAX_PEER_NETWORK_PATHS; j++) {
  1399. if (_paths[j] && getLink(_paths[j]) == failoverLink.ptr()) {
  1400. _paths[j]->address().toString(pathStr);
  1401. int inheritedHandicap = failoverScoreHandicap - 10;
  1402. int newHandicap = _paths[j]->_failoverScore > inheritedHandicap ? _paths[j]->_failoverScore : inheritedHandicap;
  1403. if (!_paths[j]->preferred()) {
  1404. newHandicap--;
  1405. }
  1406. _paths[j]->_failoverScore = newHandicap;
  1407. }
  1408. }
  1409. }
  1410. if (_paths[i].ptr() != _abPath.ptr()) {
  1411. bool bFoundPathInQueue = false;
  1412. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();++it) {
  1413. if (_paths[i].ptr() == (*it).ptr()) {
  1414. bFoundPathInQueue = true;
  1415. }
  1416. }
  1417. if (!bFoundPathInQueue) {
  1418. _abFailoverQueue.push_front(_paths[i]);
  1419. _paths[i]->address().toString(curPathStr); sprintf(traceMsg, "%s (active-backup) Added link %s/%s to peer %llx to failover queue, there are %zu links in the queue",
  1420. OSUtils::humanReadableTimestamp().c_str(), getLink(_abPath)->ifname().c_str(), curPathStr, _peer->_id.address().toInt(), _abFailoverQueue.size());
  1421. RR->t->bondStateMessage(NULL, traceMsg);
  1422. }
  1423. }
  1424. }
  1425. }
  1426. /**
  1427. * No failover instructions provided by user, build queue according to performance
  1428. * and IPv preference.
  1429. */
  1430. else if (!userHasSpecifiedFailoverInstructions()) {
  1431. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1432. if (!_paths[i]
  1433. || !_paths[i]->allowed()
  1434. || !_paths[i]->eligible(now,_ackSendInterval)) {
  1435. continue;
  1436. }
  1437. int failoverScoreHandicap = 0;
  1438. if (_paths[i]->preferred()) {
  1439. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_PREFERRED;
  1440. }
  1441. bool includeRefractoryPeriod = true;
  1442. if (!_paths[i]->eligible(now,includeRefractoryPeriod)) {
  1443. failoverScoreHandicap = -10000;
  1444. }
  1445. if (getLink(_paths[i])->primary() && _abLinkSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  1446. // If using "optimize" primary reselect mode, ignore user link designations
  1447. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_PRIMARY;
  1448. }
  1449. if (_paths[i].ptr() == negotiatedPath.ptr()) {
  1450. _paths[i]->_negotiated = true;
  1451. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_NEGOTIATED;
  1452. } else {
  1453. _paths[i]->_negotiated = false;
  1454. }
  1455. _paths[i]->_failoverScore = _paths[i]->_allocation + failoverScoreHandicap;
  1456. if (_paths[i].ptr() != _abPath.ptr()) {
  1457. bool bFoundPathInQueue = false;
  1458. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();++it) {
  1459. if (_paths[i].ptr() == (*it).ptr()) {
  1460. bFoundPathInQueue = true;
  1461. }
  1462. }
  1463. if (!bFoundPathInQueue) {
  1464. _abFailoverQueue.push_front(_paths[i]);
  1465. _paths[i]->address().toString(curPathStr);
  1466. sprintf(traceMsg, "%s (active-backup) Added link %s/%s to peer %llx, there are %zu links in the queue",
  1467. OSUtils::humanReadableTimestamp().c_str(), getLink(_paths[i])->ifname().c_str(), curPathStr, _peer->_id.address().toInt(), _abFailoverQueue.size());
  1468. RR->t->bondStateMessage(NULL, traceMsg);
  1469. }
  1470. }
  1471. }
  1472. }
  1473. _abFailoverQueue.sort(PathQualityComparator());
  1474. }
  1475. /**
  1476. * Short-circuit if we have no queued paths
  1477. */
  1478. if (_abFailoverQueue.empty()) {
  1479. return;
  1480. }
  1481. /**
  1482. * Fulfill primary reselect obligations
  1483. */
  1484. if (_abPath && !_abPath->eligible(now,_ackSendInterval)) { // Implicit ZT_MULTIPATH_RESELECTION_POLICY_FAILURE
  1485. _abPath->address().toString(curPathStr);
  1486. sprintf(traceMsg, "%s (active-backup) Link %s/%s to peer %llx has failed. Selecting new link from failover queue, there are %zu links in the queue",
  1487. OSUtils::humanReadableTimestamp().c_str(), getLink(_abPath)->ifname().c_str(), curPathStr, _peer->_id.address().toInt(), _abFailoverQueue.size());
  1488. RR->t->bondStateMessage(NULL, traceMsg);
  1489. if (!_abFailoverQueue.empty()) {
  1490. dequeueNextActiveBackupPath(now);
  1491. _abPath->address().toString(curPathStr);
  1492. sprintf(traceMsg, "%s (active-backup) Active link to peer %llx has been switched to %s/%s",
  1493. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt(), getLink(_abPath)->ifname().c_str(), curPathStr);
  1494. RR->t->bondStateMessage(NULL, traceMsg);
  1495. } else {
  1496. sprintf(traceMsg, "%s (active-backup) Failover queue is empty. No links to peer %llx to choose from",
  1497. OSUtils::humanReadableTimestamp().c_str(), _peer->_id.address().toInt());
  1498. RR->t->bondStateMessage(NULL, traceMsg);
  1499. }
  1500. }
  1501. /**
  1502. * Detect change to prevent flopping during later optimization step.
  1503. */
  1504. if (prevActiveBackupPath != _abPath) {
  1505. _lastActiveBackupPathChange = now;
  1506. }
  1507. if (_abLinkSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_ALWAYS) {
  1508. if (_abPath && !getLink(_abPath)->primary()
  1509. && getLink(_abFailoverQueue.front())->primary()) {
  1510. dequeueNextActiveBackupPath(now);
  1511. _abPath->address().toString(curPathStr);
  1512. sprintf(traceMsg, "%s (active-backup) Switching back to available primary link %s/%s to peer %llx [linkSelectionMethod = always]",
  1513. OSUtils::humanReadableTimestamp().c_str(), getLink(_abPath)->ifname().c_str(), curPathStr, _peer->_id.address().toInt());
  1514. RR->t->bondStateMessage(NULL, traceMsg);
  1515. }
  1516. }
  1517. if (_abLinkSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_BETTER) {
  1518. if (_abPath && !getLink(_abPath)->primary()) {
  1519. // Active backup has switched to "better" primary link according to re-select policy.
  1520. if (getLink(_abFailoverQueue.front())->primary()
  1521. && (_abFailoverQueue.front()->_failoverScore > _abPath->_failoverScore)) {
  1522. dequeueNextActiveBackupPath(now);
  1523. _abPath->address().toString(curPathStr);
  1524. sprintf(traceMsg, "%s (active-backup) Switching back to user-defined primary link %s/%s to peer %llx [linkSelectionMethod = better]",
  1525. OSUtils::humanReadableTimestamp().c_str(), getLink(_abPath)->ifname().c_str(), curPathStr, _peer->_id.address().toInt());
  1526. RR->t->bondStateMessage(NULL, traceMsg);
  1527. }
  1528. }
  1529. }
  1530. if (_abLinkSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE && !_abFailoverQueue.empty()) {
  1531. /**
  1532. * Implement link negotiation that was previously-decided
  1533. */
  1534. if (_abFailoverQueue.front()->_negotiated) {
  1535. dequeueNextActiveBackupPath(now);
  1536. _abPath->address().toString(prevPathStr);
  1537. _lastPathNegotiationCheck = now;
  1538. _abPath->address().toString(curPathStr);
  1539. sprintf(traceMsg, "%s (active-backup) Switching negotiated link %s/%s to peer %llx [linkSelectionMethod = optimize]",
  1540. OSUtils::humanReadableTimestamp().c_str(), getLink(_abPath)->ifname().c_str(), curPathStr, _peer->_id.address().toInt());
  1541. RR->t->bondStateMessage(NULL, traceMsg);
  1542. }
  1543. else {
  1544. // Try to find a better path and automatically switch to it -- not too often, though.
  1545. if ((now - _lastActiveBackupPathChange) > ZT_MULTIPATH_MIN_ACTIVE_BACKUP_AUTOFLOP_INTERVAL) {
  1546. if (!_abFailoverQueue.empty()) {
  1547. int newFScore = _abFailoverQueue.front()->_failoverScore;
  1548. int prevFScore = _abPath->_failoverScore;
  1549. // Establish a minimum switch threshold to prevent flapping
  1550. int failoverScoreDifference = _abFailoverQueue.front()->_failoverScore - _abPath->_failoverScore;
  1551. int thresholdQuantity = (ZT_MULTIPATH_ACTIVE_BACKUP_OPTIMIZE_MIN_THRESHOLD * (float)_abPath->_allocation);
  1552. if ((failoverScoreDifference > 0) && (failoverScoreDifference > thresholdQuantity)) {
  1553. SharedPtr<Path> oldPath = _abPath;
  1554. _abPath->address().toString(prevPathStr);
  1555. dequeueNextActiveBackupPath(now);
  1556. _abPath->address().toString(curPathStr);
  1557. sprintf(traceMsg, "%s (active-backup) Switching from %s/%s (fscore=%d) to better link %s/%s (fscore=%d) for peer %llx [linkSelectionMethod = optimize]",
  1558. OSUtils::humanReadableTimestamp().c_str(), getLink(oldPath)->ifname().c_str(), prevPathStr, prevFScore, getLink(_abPath)->ifname().c_str(), curPathStr, newFScore, _peer->_id.address().toInt());
  1559. RR->t->bondStateMessage(NULL, traceMsg);
  1560. }
  1561. }
  1562. }
  1563. }
  1564. }
  1565. }
  1566. void Bond::setReasonableDefaults(int policy, SharedPtr<Bond> templateBond, bool useTemplate)
  1567. {
  1568. // If invalid bonding policy, try default
  1569. int _defaultBondingPolicy = BondController::defaultBondingPolicy();
  1570. if (policy <= ZT_BONDING_POLICY_NONE || policy > ZT_BONDING_POLICY_BALANCE_AWARE) {
  1571. // If no default set, use NONE (effectively disabling this bond)
  1572. if (_defaultBondingPolicy < ZT_BONDING_POLICY_NONE || _defaultBondingPolicy > ZT_BONDING_POLICY_BALANCE_AWARE) {
  1573. _bondingPolicy= ZT_BONDING_POLICY_NONE;
  1574. }
  1575. _bondingPolicy= _defaultBondingPolicy;
  1576. } else {
  1577. _bondingPolicy= policy;
  1578. }
  1579. _freeRandomByte = 0;
  1580. _userHasSpecifiedPrimaryLink = false;
  1581. _userHasSpecifiedFailoverInstructions = false;
  1582. _isHealthy = false;
  1583. _numAliveLinks = 0;
  1584. _numTotalLinks = 0;
  1585. _downDelay = 0;
  1586. _upDelay = 0;
  1587. _allowFlowHashing=false;
  1588. _bondMonitorInterval=0;
  1589. _shouldCollectPathStatistics=false;
  1590. // Path negotiation
  1591. _allowPathNegotiation=false;
  1592. _pathNegotiationCutoffCount=0;
  1593. _localUtility=0;
  1594. _numBondedPaths=0;
  1595. _rrPacketsSentOnCurrLink=0;
  1596. _rrIdx=0;
  1597. _totalBondUnderload = 0;
  1598. _maxAcceptableLatency = 100;
  1599. _maxAcceptablePacketDelayVariance = 50;
  1600. _maxAcceptablePacketLossRatio = 0.10;
  1601. _maxAcceptablePacketErrorRatio = 0.10;
  1602. _userHasSpecifiedLinkSpeeds=0;
  1603. /* ZT_MULTIPATH_FLOW_REBALANCE_STRATEGY_PASSIVE is the most conservative strategy and is
  1604. least likely to cause unexpected behavior */
  1605. _flowRebalanceStrategy = ZT_MULTIPATH_FLOW_REBALANCE_STRATEGY_AGGRESSIVE;
  1606. /**
  1607. * Paths are actively monitored to provide a real-time quality/preference-ordered rapid failover queue.
  1608. */
  1609. switch (policy) {
  1610. case ZT_BONDING_POLICY_ACTIVE_BACKUP:
  1611. _failoverInterval = 500;
  1612. _abLinkSelectMethod = ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE;
  1613. _linkMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1614. _qualityWeights[ZT_QOS_LAT_IDX] = 0.2f;
  1615. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1616. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1617. _qualityWeights[ZT_QOS_PLR_IDX] = 0.2f;
  1618. _qualityWeights[ZT_QOS_PER_IDX] = 0.2f;
  1619. _qualityWeights[ZT_QOS_THR_IDX] = 0.2f;
  1620. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1621. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1622. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1623. break;
  1624. /**
  1625. * All seemingly-alive paths are used. Paths are not actively monitored.
  1626. */
  1627. case ZT_BONDING_POLICY_BROADCAST:
  1628. _downDelay = 30000;
  1629. _upDelay = 0;
  1630. break;
  1631. /**
  1632. * Paths are monitored to determine when/if one needs to be added or removed from the rotation
  1633. */
  1634. case ZT_BONDING_POLICY_BALANCE_RR:
  1635. _failoverInterval = 3000;
  1636. _allowFlowHashing = false;
  1637. _packetsPerLink = 1024;
  1638. _linkMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1639. _qualityWeights[ZT_QOS_LAT_IDX] = 0.4f;
  1640. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1641. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1642. _qualityWeights[ZT_QOS_PLR_IDX] = 0.1f;
  1643. _qualityWeights[ZT_QOS_PER_IDX] = 0.1f;
  1644. _qualityWeights[ZT_QOS_THR_IDX] = 0.1f;
  1645. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1646. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1647. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1648. break;
  1649. /**
  1650. * Path monitoring is used to determine the capacity of each
  1651. * path and where to place the next flow.
  1652. */
  1653. case ZT_BONDING_POLICY_BALANCE_XOR:
  1654. _failoverInterval = 3000;
  1655. _upDelay = _bondMonitorInterval * 2;
  1656. _allowFlowHashing = true;
  1657. _linkMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1658. _qualityWeights[ZT_QOS_LAT_IDX] = 0.4f;
  1659. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1660. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1661. _qualityWeights[ZT_QOS_PLR_IDX] = 0.1f;
  1662. _qualityWeights[ZT_QOS_PER_IDX] = 0.1f;
  1663. _qualityWeights[ZT_QOS_THR_IDX] = 0.1f;
  1664. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1665. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1666. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1667. break;
  1668. /**
  1669. * Path monitoring is used to determine the capacity of each
  1670. * path and where to place the next flow. Additionally, re-shuffling
  1671. * of flows may take place.
  1672. */
  1673. case ZT_BONDING_POLICY_BALANCE_AWARE:
  1674. _failoverInterval = 3000;
  1675. _allowFlowHashing = true;
  1676. _linkMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1677. _qualityWeights[ZT_QOS_LAT_IDX] = 0.4f;
  1678. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1679. _qualityWeights[ZT_QOS_PDV_IDX] = 0.4f;
  1680. _qualityWeights[ZT_QOS_PLR_IDX] = 0.2f;
  1681. _qualityWeights[ZT_QOS_PER_IDX] = 0.0f;
  1682. _qualityWeights[ZT_QOS_THR_IDX] = 0.0f;
  1683. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1684. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1685. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1686. break;
  1687. default:
  1688. break;
  1689. }
  1690. /* If a user has specified custom parameters for this bonding policy, overlay
  1691. them onto the defaults that were previously set */
  1692. if (useTemplate) {
  1693. _policyAlias = templateBond->_policyAlias;
  1694. _failoverInterval = templateBond->_failoverInterval;
  1695. _downDelay = templateBond->_downDelay;
  1696. _upDelay = templateBond->_upDelay;
  1697. if (templateBond->_linkMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_PASSIVE
  1698. && templateBond->_failoverInterval != 0) {
  1699. //fprintf(stderr, "warning: passive path monitoring was specified, this will prevent failovers from happening in a timely manner.\n");
  1700. }
  1701. _abLinkSelectMethod = templateBond->_abLinkSelectMethod;
  1702. memcpy(_qualityWeights, templateBond->_qualityWeights, ZT_QOS_WEIGHT_SIZE * sizeof(float));
  1703. }
  1704. /* Set timer geometries */
  1705. _bondMonitorInterval = _failoverInterval / 3;
  1706. BondController::setMinReqPathMonitorInterval(_bondMonitorInterval);
  1707. _ackSendInterval = _failoverInterval;
  1708. _qualityEstimationInterval = _failoverInterval * 2;
  1709. _dynamicPathMonitorInterval = 0;
  1710. _ackCutoffCount = 0;
  1711. _qosSendInterval = _bondMonitorInterval * 4;
  1712. _qosCutoffCount = 0;
  1713. throughputMeasurementInterval = _ackSendInterval * 2;
  1714. _defaultPathRefractoryPeriod = 8000;
  1715. }
  1716. void Bond::setUserQualityWeights(float weights[], int len)
  1717. {
  1718. if (len == ZT_QOS_WEIGHT_SIZE) {
  1719. float weightTotal = 0.0;
  1720. for (unsigned int i=0; i<ZT_QOS_WEIGHT_SIZE; ++i) {
  1721. weightTotal += weights[i];
  1722. }
  1723. if (weightTotal > 0.99 && weightTotal < 1.01) {
  1724. memcpy(_qualityWeights, weights, len * sizeof(float));
  1725. }
  1726. }
  1727. }
  1728. bool Bond::relevant() {
  1729. return false;
  1730. }
  1731. SharedPtr<Link> Bond::getLink(const SharedPtr<Path>& path)
  1732. {
  1733. return RR->bc->getLinkBySocket(_policyAlias, path->localSocket());
  1734. }
  1735. void Bond::dumpInfo(const int64_t now)
  1736. {
  1737. // Omitted
  1738. }
  1739. } // namespace ZeroTier