| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697 | package queueimport (	"time"	"github.com/gravitl/netmaker/logger"	"github.com/gravitl/netmaker/logic"	"github.com/gravitl/netmaker/models"	"github.com/gravitl/netmaker/netclient/ncutils")func updateNodeMetrics(currentNode *models.Node, newMetrics *models.Metrics) bool {	if newMetrics.FailoverPeers == nil {		newMetrics.FailoverPeers = make(map[string]string)	}	oldMetrics, err := logic.GetMetrics(currentNode.ID.String())	if err != nil {		logger.Log(1, "error finding old metrics for node", currentNode.ID.String())		return false	}	if oldMetrics.FailoverPeers == nil {		oldMetrics.FailoverPeers = make(map[string]string)	}	var attachedClients []models.ExtClient	if currentNode.IsIngressGateway {		clients, err := logic.GetExtClientsByID(currentNode.ID.String(), currentNode.Network)		if err == nil {			attachedClients = clients		}	}	if len(attachedClients) > 0 {		// associate ext clients with IDs		for i := range attachedClients {			extMetric := newMetrics.Connectivity[attachedClients[i].PublicKey]			if len(extMetric.NodeName) == 0 &&				len(newMetrics.Connectivity[attachedClients[i].ClientID].NodeName) > 0 { // cover server clients				extMetric = newMetrics.Connectivity[attachedClients[i].ClientID]				if extMetric.TotalReceived > 0 && extMetric.TotalSent > 0 {					extMetric.Connected = true				}			}			extMetric.NodeName = attachedClients[i].ClientID			delete(newMetrics.Connectivity, attachedClients[i].PublicKey)			newMetrics.Connectivity[attachedClients[i].ClientID] = extMetric		}	}	// run through metrics for each peer	for k := range newMetrics.Connectivity {		currMetric := newMetrics.Connectivity[k]		oldMetric := oldMetrics.Connectivity[k]		currMetric.TotalTime += oldMetric.TotalTime		currMetric.Uptime += oldMetric.Uptime // get the total uptime for this connection		if currMetric.Uptime == 0 || currMetric.TotalTime == 0 {			currMetric.PercentUp = 0		} else {			currMetric.PercentUp = 100.0 * (float64(currMetric.Uptime) / float64(currMetric.TotalTime))		}		totalUpMinutes := currMetric.Uptime * ncutils.CheckInInterval		currMetric.ActualUptime = time.Duration(totalUpMinutes) * time.Minute		delete(oldMetrics.Connectivity, k) // remove from old data		newMetrics.Connectivity[k] = currMetric	}	// add nodes that need failover	nodes, err := logic.GetNetworkNodes(currentNode.Network)	if err != nil {		logger.Log(0, "failed to retrieve nodes while updating metrics")		return false	}	for _, node := range nodes {		if !newMetrics.Connectivity[node.ID.String()].Connected &&			len(newMetrics.Connectivity[node.ID.String()].NodeName) > 0 &&			node.Connected &&			len(node.FailoverNode) > 0 &&			!node.Failover {			newMetrics.FailoverPeers[node.ID.String()] = node.FailoverNode.String()		}	}	shouldUpdate := len(oldMetrics.FailoverPeers) == 0 && len(newMetrics.FailoverPeers) > 0	for k, v := range oldMetrics.FailoverPeers {		if len(newMetrics.FailoverPeers[k]) > 0 && len(v) == 0 {			shouldUpdate = true		}		if len(v) > 0 && len(newMetrics.FailoverPeers[k]) == 0 {			newMetrics.FailoverPeers[k] = v		}	}	for k := range oldMetrics.Connectivity { // cleanup any left over data, self healing		delete(newMetrics.Connectivity, k)	}	return shouldUpdate}
 |