|
@@ -373,6 +373,7 @@ SharedPtr<Path> Bond::getAppropriatePath(int64_t now, int32_t flowId)
|
|
|
*/
|
|
|
if (_policy == ZT_BOND_POLICY_ACTIVE_BACKUP) {
|
|
|
if (_abPathIdx != ZT_MAX_PEER_NETWORK_PATHS && _paths[_abPathIdx].p) {
|
|
|
+ //fprintf(stderr, "trying to send via (_abPathIdx=%d) %s\n", _abPathIdx, pathToStr(_paths[_abPathIdx].p).c_str());
|
|
|
return _paths[_abPathIdx].p;
|
|
|
}
|
|
|
}
|
|
@@ -1032,6 +1033,13 @@ void Bond::curateBond(int64_t now, bool rebuildBond)
|
|
|
bool satisfiedUpDelay = (now - _paths[i].lastAliveToggle) >= _upDelay;
|
|
|
// How long since the last QoS was received (Must be less than ZT_PEER_PATH_EXPIRATION since the remote peer's _qosSendInterval isn't known)
|
|
|
bool acceptableQoSAge = (_paths[i].lastQoSReceived == 0 && inTrial) || ((now - _paths[i].lastQoSReceived) < ZT_PEER_EXPIRED_PATH_TRIAL_PERIOD);
|
|
|
+
|
|
|
+ // Allow active-backup to operate without the receipt of QoS records
|
|
|
+ // This may be expanded to the other modes as an option
|
|
|
+ if (_policy == ZT_BOND_POLICY_ACTIVE_BACKUP) {
|
|
|
+ acceptableQoSAge = true;
|
|
|
+ }
|
|
|
+
|
|
|
currEligibility = _paths[i].allowed() && ((acceptableAge && satisfiedUpDelay && acceptableQoSAge) || inTrial);
|
|
|
|
|
|
if (currEligibility) {
|
|
@@ -1043,12 +1051,11 @@ void Bond::curateBond(int64_t now, bool rebuildBond)
|
|
|
*/
|
|
|
if (currEligibility != _paths[i].eligible) {
|
|
|
if (currEligibility == 0) {
|
|
|
- log("link %s is no longer eligible", pathToStr(_paths[i].p).c_str());
|
|
|
+ log("link %s is no longer eligible (reason: allowed=%d, age=%d, ud=%d, qos=%d, trial=%d)", pathToStr(_paths[i].p).c_str(), _paths[i].allowed(), acceptableAge, satisfiedUpDelay, acceptableQoSAge, inTrial);
|
|
|
}
|
|
|
if (currEligibility == 1) {
|
|
|
log("link %s is eligible", pathToStr(_paths[i].p).c_str());
|
|
|
}
|
|
|
- debug("\t[%d] allowed=%d, age=%d, qa=%d, ud=%d, trial=%d", i, _paths[i].allowed(), acceptableAge, acceptableQoSAge, satisfiedUpDelay, inTrial);
|
|
|
dumpPathStatus(now, i);
|
|
|
if (currEligibility) {
|
|
|
rebuildBond = true;
|
|
@@ -1094,6 +1101,7 @@ void Bond::curateBond(int64_t now, bool rebuildBond)
|
|
|
* Curate the set of paths that are part of the bond proper. Select a set of paths
|
|
|
* per logical link according to eligibility and user-specified constraints.
|
|
|
*/
|
|
|
+ int updatedBondedPathCount = 0;
|
|
|
if ((_policy == ZT_BOND_POLICY_BALANCE_RR) || (_policy == ZT_BOND_POLICY_BALANCE_XOR) || (_policy == ZT_BOND_POLICY_BALANCE_AWARE)) {
|
|
|
if (! _numBondedPaths) {
|
|
|
rebuildBond = true;
|
|
@@ -1105,7 +1113,6 @@ void Bond::curateBond(int64_t now, bool rebuildBond)
|
|
|
_paths[i].bonded = false;
|
|
|
}
|
|
|
|
|
|
- int updatedBondedPathCount = 0;
|
|
|
// Build map associating paths with local physical links. Will be selected from in next step
|
|
|
std::map<SharedPtr<Link>, std::vector<int> > linkMap;
|
|
|
for (int i = 0; i < ZT_MAX_PEER_NETWORK_PATHS; ++i) {
|
|
@@ -1207,6 +1214,14 @@ void Bond::curateBond(int64_t now, bool rebuildBond)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+ if (_policy == ZT_BOND_POLICY_ACTIVE_BACKUP) {
|
|
|
+ for (int i = 0; i < ZT_MAX_PEER_NETWORK_PATHS; ++i) {
|
|
|
+ if (_paths[i].p && _paths[i].bonded) {
|
|
|
+ updatedBondedPathCount++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ _numBondedPaths = updatedBondedPathCount;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void Bond::estimatePathQuality(int64_t now)
|
|
@@ -1488,7 +1503,8 @@ void Bond::processActiveBackupTasks(void* tPtr, int64_t now)
|
|
|
{
|
|
|
int prevActiveBackupPathIdx = _abPathIdx;
|
|
|
int nonPreferredPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
|
|
|
- bool bFoundPrimaryLink = false;
|
|
|
+ bool foundPathOnPrimaryLink = false;
|
|
|
+ bool foundPreferredPath = false;
|
|
|
|
|
|
if (_abPathIdx != ZT_MAX_PEER_NETWORK_PATHS && ! _paths[_abPathIdx].p) {
|
|
|
_abPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
|
|
@@ -1551,15 +1567,16 @@ void Bond::processActiveBackupTasks(void* tPtr, int64_t now)
|
|
|
if (! _paths[i].preferred()) {
|
|
|
// Found path on primary link, take note in case we don't find a preferred path
|
|
|
nonPreferredPathIdx = i;
|
|
|
- bFoundPrimaryLink = true;
|
|
|
+ foundPathOnPrimaryLink = true;
|
|
|
}
|
|
|
if (_paths[i].preferred()) {
|
|
|
_abPathIdx = i;
|
|
|
- bFoundPrimaryLink = true;
|
|
|
+ foundPathOnPrimaryLink = true;
|
|
|
if (_paths[_abPathIdx].p) {
|
|
|
SharedPtr<Link> abLink = RR->bc->getLinkBySocket(_policyAlias, _paths[_abPathIdx].p->localSocket());
|
|
|
if (abLink) {
|
|
|
- log("found preferred primary link %s", pathToStr(_paths[_abPathIdx].p).c_str());
|
|
|
+ log("found preferred primary link (_abPathIdx=%d), %s", _abPathIdx, pathToStr(_paths[_abPathIdx].p).c_str());
|
|
|
+ foundPreferredPath = true;
|
|
|
}
|
|
|
break; // Found preferred path on primary link
|
|
|
}
|
|
@@ -1567,8 +1584,8 @@ void Bond::processActiveBackupTasks(void* tPtr, int64_t now)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- if (bFoundPrimaryLink && (nonPreferredPathIdx != ZT_MAX_PEER_NETWORK_PATHS)) {
|
|
|
- log("found non-preferred primary link");
|
|
|
+ if (!foundPreferredPath && foundPathOnPrimaryLink && (nonPreferredPathIdx != ZT_MAX_PEER_NETWORK_PATHS)) {
|
|
|
+ log("found non-preferred primary link (_abPathIdx=%d)", _abPathIdx);
|
|
|
_abPathIdx = nonPreferredPathIdx;
|
|
|
}
|
|
|
}
|
|
@@ -1606,10 +1623,10 @@ void Bond::processActiveBackupTasks(void* tPtr, int64_t now)
|
|
|
}
|
|
|
if (_paths[(*it)].p && ! _paths[(*it)].eligible) {
|
|
|
SharedPtr<Link> link = RR->bc->getLinkBySocket(_policyAlias, _paths[(*it)].p->localSocket());
|
|
|
- it = _abFailoverQueue.erase(it);
|
|
|
if (link) {
|
|
|
- log("link %s is ineligible, removing from failover queue (%zu links remain in queue)", pathToStr(_paths[_abPathIdx].p).c_str(), _abFailoverQueue.size());
|
|
|
+ log("link %s is ineligible, removing from failover queue (%zu links remain in queue)", pathToStr(_paths[(*it)].p).c_str(), _abFailoverQueue.size());
|
|
|
}
|
|
|
+ it = _abFailoverQueue.erase(it);
|
|
|
continue;
|
|
|
}
|
|
|
else {
|
|
@@ -1676,9 +1693,9 @@ void Bond::processActiveBackupTasks(void* tPtr, int64_t now)
|
|
|
}
|
|
|
}
|
|
|
if (! bFoundPathInQueue) {
|
|
|
- _abFailoverQueue.push_front(i);
|
|
|
+ _abFailoverQueue.push_back(i);
|
|
|
log("add link %s to failover queue (%zu links in queue)", pathToStr(_paths[i].p).c_str(), _abFailoverQueue.size());
|
|
|
- addPathToBond(0, i);
|
|
|
+ addPathToBond(i, 0);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1726,13 +1743,14 @@ void Bond::processActiveBackupTasks(void* tPtr, int64_t now)
|
|
|
}
|
|
|
}
|
|
|
if (! bFoundPathInQueue) {
|
|
|
- _abFailoverQueue.push_front(i);
|
|
|
+ _abFailoverQueue.push_back(i);
|
|
|
log("add link %s to failover queue (%zu links in queue)", pathToStr(_paths[i].p).c_str(), _abFailoverQueue.size());
|
|
|
- addPathToBond(0, i);
|
|
|
+ addPathToBond(i, 0);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+ /*
|
|
|
// Sort queue based on performance
|
|
|
if (! _abFailoverQueue.empty()) {
|
|
|
for (int i = 0; i < _abFailoverQueue.size(); i++) {
|
|
@@ -1744,7 +1762,7 @@ void Bond::processActiveBackupTasks(void* tPtr, int64_t now)
|
|
|
}
|
|
|
_abFailoverQueue[hole_position] = value_to_insert;
|
|
|
}
|
|
|
- }
|
|
|
+ }*/
|
|
|
|
|
|
/**
|
|
|
* Short-circuit if we have no queued paths
|
|
@@ -1894,7 +1912,7 @@ void Bond::setBondParameters(int policy, SharedPtr<Bond> templateBond, bool useT
|
|
|
* Policy defaults
|
|
|
*/
|
|
|
_abPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
|
|
|
- _abLinkSelectMethod = ZT_BOND_RESELECTION_POLICY_OPTIMIZE;
|
|
|
+ _abLinkSelectMethod = ZT_BOND_RESELECTION_POLICY_ALWAYS;
|
|
|
_rrPacketsSentOnCurrLink = 0;
|
|
|
_rrIdx = 0;
|
|
|
_packetsPerLink = 64;
|
|
@@ -2013,7 +2031,8 @@ void Bond::dumpInfo(int64_t now, bool force)
|
|
|
_lastSummaryDump = now;
|
|
|
float overhead = (_overheadBytes / (timeSinceLastDump / 1000.0f) / 1000.0f);
|
|
|
_overheadBytes = 0;
|
|
|
- log("bond: bp=%d, fi=%" PRIu64 ", mi=%d, ud=%d, dd=%d, flows=%zu, leaf=%d, overhead=%f KB/s, links=(%d/%d)",
|
|
|
+ log("bond: ready=%d, bp=%d, fi=%" PRIu64 ", mi=%d, ud=%d, dd=%d, flows=%zu, leaf=%d, overhead=%f KB/s, links=(%d/%d)",
|
|
|
+ isReady(),
|
|
|
_policy,
|
|
|
_failoverInterval,
|
|
|
_monitorInterval,
|