|
@@ -2,12 +2,10 @@ package functions
|
|
|
|
|
|
import (
|
|
|
"context"
|
|
|
- "encoding/json"
|
|
|
"errors"
|
|
|
"fmt"
|
|
|
"os"
|
|
|
"os/signal"
|
|
|
- "runtime"
|
|
|
"strings"
|
|
|
"sync"
|
|
|
"syscall"
|
|
@@ -19,14 +17,11 @@ import (
|
|
|
"github.com/gravitl/netmaker/netclient/auth"
|
|
|
"github.com/gravitl/netmaker/netclient/config"
|
|
|
"github.com/gravitl/netmaker/netclient/daemon"
|
|
|
- "github.com/gravitl/netmaker/netclient/local"
|
|
|
"github.com/gravitl/netmaker/netclient/ncutils"
|
|
|
"github.com/gravitl/netmaker/netclient/wireguard"
|
|
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
|
|
)
|
|
|
|
|
|
-// == Message Caches ==
|
|
|
-// var keepalive = new(sync.Map)
|
|
|
var messageCache = new(sync.Map)
|
|
|
var networkcontext = new(sync.Map)
|
|
|
|
|
@@ -38,63 +33,159 @@ type cachedMessage struct {
|
|
|
LastSeen time.Time
|
|
|
}
|
|
|
|
|
|
-func insert(network, which, cache string) {
|
|
|
- var newMessage = cachedMessage{
|
|
|
- Message: cache,
|
|
|
- LastSeen: time.Now(),
|
|
|
+// Daemon runs netclient daemon from command line
|
|
|
+func Daemon() error {
|
|
|
+ // == initial pull of all networks ==
|
|
|
+ networks, _ := ncutils.GetSystemNetworks()
|
|
|
+ for _, network := range networks {
|
|
|
+ var cfg config.ClientConfig
|
|
|
+ cfg.Network = network
|
|
|
+ cfg.ReadConfig()
|
|
|
+ initialPull(cfg.Network)
|
|
|
}
|
|
|
- messageCache.Store(fmt.Sprintf("%s%s", network, which), newMessage)
|
|
|
-}
|
|
|
|
|
|
-func read(network, which string) string {
|
|
|
- val, isok := messageCache.Load(fmt.Sprintf("%s%s", network, which))
|
|
|
- if isok {
|
|
|
- var readMessage = val.(cachedMessage) // fetch current cached message
|
|
|
- if readMessage.LastSeen.IsZero() {
|
|
|
- return ""
|
|
|
- }
|
|
|
- if time.Now().After(readMessage.LastSeen.Add(time.Minute * 10)) { // check if message has been there over a minute
|
|
|
- messageCache.Delete(fmt.Sprintf("%s%s", network, which)) // remove old message if expired
|
|
|
- return ""
|
|
|
+ // == get all the comms networks on machine ==
|
|
|
+ commsNetworks, err := getCommsNetworks(networks[:])
|
|
|
+ if err != nil {
|
|
|
+ return errors.New("no comm networks exist")
|
|
|
+ }
|
|
|
+
|
|
|
+ // == subscribe to all nodes on each comms network on machine ==
|
|
|
+ for currCommsNet := range commsNetworks {
|
|
|
+ ncutils.PrintLog("started comms network daemon, "+currCommsNet, 1)
|
|
|
+ ctx, cancel := context.WithCancel(context.Background())
|
|
|
+ networkcontext.Store(currCommsNet, cancel)
|
|
|
+ go messageQueue(ctx, currCommsNet)
|
|
|
+ }
|
|
|
+
|
|
|
+ // == add waitgroup and cancel for checkin routine ==
|
|
|
+ wg := sync.WaitGroup{}
|
|
|
+ ctx, cancel := context.WithCancel(context.Background())
|
|
|
+ wg.Add(1)
|
|
|
+ go Checkin(ctx, &wg, commsNetworks)
|
|
|
+ quit := make(chan os.Signal, 1)
|
|
|
+ signal.Notify(quit, syscall.SIGTERM, os.Interrupt, os.Kill)
|
|
|
+ <-quit
|
|
|
+ for currCommsNet := range commsNetworks {
|
|
|
+ if cancel, ok := networkcontext.Load(currCommsNet); ok {
|
|
|
+ cancel.(context.CancelFunc)()
|
|
|
}
|
|
|
- return readMessage.Message // return current message if not expired
|
|
|
}
|
|
|
- return ""
|
|
|
+ cancel()
|
|
|
+ ncutils.Log("shutting down netclient daemon")
|
|
|
+ wg.Wait()
|
|
|
+ ncutils.Log("shutdown complete")
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
-// == End Message Caches ==
|
|
|
+// UpdateKeys -- updates private key and returns new publickey
|
|
|
+func UpdateKeys(nodeCfg *config.ClientConfig, client mqtt.Client) error {
|
|
|
+ ncutils.Log("received message to update wireguard keys for network " + nodeCfg.Network)
|
|
|
+ key, err := wgtypes.GeneratePrivateKey()
|
|
|
+ if err != nil {
|
|
|
+ ncutils.Log("error generating privatekey " + err.Error())
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ file := ncutils.GetNetclientPathSpecific() + nodeCfg.Node.Interface + ".conf"
|
|
|
+ if err := wireguard.UpdatePrivateKey(file, key.String()); err != nil {
|
|
|
+ ncutils.Log("error updating wireguard key " + err.Error())
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ if storeErr := wireguard.StorePrivKey(key.String(), nodeCfg.Network); storeErr != nil {
|
|
|
+ ncutils.Log("failed to save private key" + storeErr.Error())
|
|
|
+ return storeErr
|
|
|
+ }
|
|
|
|
|
|
-// Daemon runs netclient daemon from command line
|
|
|
-func Daemon() error {
|
|
|
- networks, err := ncutils.GetSystemNetworks()
|
|
|
+ nodeCfg.Node.PublicKey = key.PublicKey().String()
|
|
|
+ var commsCfg = getCommsCfgByNode(&nodeCfg.Node)
|
|
|
+ PublishNodeUpdate(&commsCfg, nodeCfg)
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// PingServer -- checks if server is reachable
|
|
|
+// use commsCfg only*
|
|
|
+func PingServer(commsCfg *config.ClientConfig) error {
|
|
|
+ node := getServerAddress(commsCfg)
|
|
|
+ pinger, err := ping.NewPinger(node)
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
- for _, network := range networks {
|
|
|
- ctx, cancel := context.WithCancel(context.Background())
|
|
|
- networkcontext.Store(network, cancel)
|
|
|
- go MessageQueue(ctx, network)
|
|
|
+ pinger.Timeout = 2 * time.Second
|
|
|
+ pinger.Run()
|
|
|
+ stats := pinger.Statistics()
|
|
|
+ if stats.PacketLoss == 100 {
|
|
|
+ return errors.New("ping error")
|
|
|
}
|
|
|
- quit := make(chan os.Signal, 1)
|
|
|
- signal.Notify(quit, syscall.SIGTERM, os.Interrupt)
|
|
|
- <-quit
|
|
|
- for _, network := range networks {
|
|
|
- if cancel, ok := networkcontext.Load(network); ok {
|
|
|
- cancel.(context.CancelFunc)()
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// == Private ==
|
|
|
+
|
|
|
+// sets MQ client subscriptions for a specific node config
|
|
|
+// should be called for each node belonging to a given comms network
|
|
|
+func setSubscriptions(client mqtt.Client, nodeCfg *config.ClientConfig) {
|
|
|
+ if nodeCfg.DebugOn {
|
|
|
+ if token := client.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil {
|
|
|
+ ncutils.Log(token.Error().Error())
|
|
|
+ return
|
|
|
}
|
|
|
+ ncutils.Log("subscribed to all topics for debugging purposes")
|
|
|
}
|
|
|
- ncutils.Log("all done")
|
|
|
- return nil
|
|
|
|
|
|
+ if token := client.Subscribe(fmt.Sprintf("update/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID), 0, mqtt.MessageHandler(NodeUpdate)); token.Wait() && token.Error() != nil {
|
|
|
+ ncutils.Log(token.Error().Error())
|
|
|
+ return
|
|
|
+ }
|
|
|
+ if nodeCfg.DebugOn {
|
|
|
+ ncutils.Log(fmt.Sprintf("subscribed to node updates for node %s update/%s/%s", nodeCfg.Node.Name, nodeCfg.Node.Network, nodeCfg.Node.ID))
|
|
|
+ }
|
|
|
+ if token := client.Subscribe(fmt.Sprintf("peers/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID), 0, mqtt.MessageHandler(UpdatePeers)); token.Wait() && token.Error() != nil {
|
|
|
+ ncutils.Log(token.Error().Error())
|
|
|
+ return
|
|
|
+ }
|
|
|
+ if nodeCfg.DebugOn {
|
|
|
+ ncutils.Log(fmt.Sprintf("subscribed to peer updates for node %s peers/%s/%s", nodeCfg.Node.Name, nodeCfg.Node.Network, nodeCfg.Node.ID))
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-// SetupMQTT creates a connection to broker and return client
|
|
|
-func SetupMQTT(cfg *config.ClientConfig, publish bool) mqtt.Client {
|
|
|
+// on a delete usually, pass in the nodecfg to unsubscribe client broker communications
|
|
|
+// for the node in nodeCfg
|
|
|
+func unsubscribeNode(client mqtt.Client, nodeCfg *config.ClientConfig) {
|
|
|
+ client.Unsubscribe(fmt.Sprintf("update/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID))
|
|
|
+ var ok = true
|
|
|
+ if token := client.Unsubscribe(fmt.Sprintf("update/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID)); token.Wait() && token.Error() != nil {
|
|
|
+ ncutils.PrintLog("unable to unsubscribe from updates for node "+nodeCfg.Node.Name+"\n"+token.Error().Error(), 1)
|
|
|
+ ok = false
|
|
|
+ }
|
|
|
+ if token := client.Unsubscribe(fmt.Sprintf("peers/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID)); token.Wait() && token.Error() != nil {
|
|
|
+ ncutils.PrintLog("unable to unsubscribe from peer updates for node "+nodeCfg.Node.Name+"\n"+token.Error().Error(), 1)
|
|
|
+ ok = false
|
|
|
+ }
|
|
|
+ if ok {
|
|
|
+ ncutils.PrintLog("successfully unsubscribed node "+nodeCfg.Node.ID+" : "+nodeCfg.Node.Name, 0)
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// sets up Message Queue and subsribes/publishes updates to/from server
|
|
|
+// the client should subscribe to ALL nodes that exist on unique comms network locally
|
|
|
+func messageQueue(ctx context.Context, commsNet string) {
|
|
|
+ var commsCfg config.ClientConfig
|
|
|
+ commsCfg.Network = commsNet
|
|
|
+ commsCfg.ReadConfig()
|
|
|
+ ncutils.Log("netclient daemon started for network: " + commsNet)
|
|
|
+ client := setupMQTT(&commsCfg, false)
|
|
|
+ defer client.Disconnect(250)
|
|
|
+ <-ctx.Done()
|
|
|
+ ncutils.Log("shutting down daemon for comms network " + commsNet)
|
|
|
+}
|
|
|
+
|
|
|
+// setupMQTT creates a connection to broker and return client
|
|
|
+// utilizes comms client configs to setup connections
|
|
|
+func setupMQTT(commsCfg *config.ClientConfig, publish bool) mqtt.Client {
|
|
|
opts := mqtt.NewClientOptions()
|
|
|
- server := getServerAddress(cfg)
|
|
|
- opts.AddBroker(server + ":1883")
|
|
|
- id := ncutils.MakeRandomString(23)
|
|
|
- opts.ClientID = id
|
|
|
+ server := getServerAddress(commsCfg)
|
|
|
+ opts.AddBroker(server + ":1883") // TODO get the appropriate port of the comms mq server
|
|
|
+ opts.ClientID = ncutils.MakeRandomString(23) // helps avoid id duplication on broker
|
|
|
opts.SetDefaultPublishHandler(All)
|
|
|
opts.SetAutoReconnect(true)
|
|
|
opts.SetConnectRetry(true)
|
|
@@ -103,43 +194,26 @@ func SetupMQTT(cfg *config.ClientConfig, publish bool) mqtt.Client {
|
|
|
opts.SetWriteTimeout(time.Minute)
|
|
|
opts.SetOnConnectHandler(func(client mqtt.Client) {
|
|
|
if !publish {
|
|
|
- if cfg.DebugOn {
|
|
|
- if token := client.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil {
|
|
|
- ncutils.Log(token.Error().Error())
|
|
|
- return
|
|
|
- }
|
|
|
- ncutils.Log("subscribed to all topics for debugging purposes")
|
|
|
- }
|
|
|
- if token := client.Subscribe(fmt.Sprintf("update/%s/%s", cfg.Node.Network, cfg.Node.ID), 0, mqtt.MessageHandler(NodeUpdate)); token.Wait() && token.Error() != nil {
|
|
|
- ncutils.Log(token.Error().Error())
|
|
|
- return
|
|
|
- }
|
|
|
- if cfg.DebugOn {
|
|
|
- ncutils.Log(fmt.Sprintf("subscribed to node updates for node %s update/%s/%s", cfg.Node.Name, cfg.Node.Network, cfg.Node.ID))
|
|
|
- }
|
|
|
- if token := client.Subscribe(fmt.Sprintf("peers/%s/%s", cfg.Node.Network, cfg.Node.ID), 0, mqtt.MessageHandler(UpdatePeers)); token.Wait() && token.Error() != nil {
|
|
|
- ncutils.Log(token.Error().Error())
|
|
|
- return
|
|
|
+ networks, err := ncutils.GetSystemNetworks()
|
|
|
+ if err != nil {
|
|
|
+ ncutils.Log("error retriving networks " + err.Error())
|
|
|
}
|
|
|
- if cfg.DebugOn {
|
|
|
- ncutils.Log(fmt.Sprintf("subscribed to peer updates for node %s peers/%s/%s", cfg.Node.Name, cfg.Node.Network, cfg.Node.ID))
|
|
|
+ for _, network := range networks {
|
|
|
+ var currNodeCfg config.ClientConfig
|
|
|
+ currNodeCfg.Network = network
|
|
|
+ currNodeCfg.ReadConfig()
|
|
|
+ setSubscriptions(client, &currNodeCfg)
|
|
|
}
|
|
|
- opts.SetOrderMatters(true)
|
|
|
- opts.SetResumeSubs(true)
|
|
|
}
|
|
|
})
|
|
|
+ opts.SetOrderMatters(true)
|
|
|
+ opts.SetResumeSubs(true)
|
|
|
opts.SetConnectionLostHandler(func(c mqtt.Client, e error) {
|
|
|
- ncutils.Log("detected broker connection lost, running pull for " + cfg.Node.Network)
|
|
|
- _, err := Pull(cfg.Node.Network, true)
|
|
|
+ ncutils.Log("detected broker connection lost, running pull for " + commsCfg.Node.Network)
|
|
|
+ _, err := Pull(commsCfg.Node.Network, true)
|
|
|
if err != nil {
|
|
|
ncutils.Log("could not run pull, server unreachable: " + err.Error())
|
|
|
ncutils.Log("waiting to retry...")
|
|
|
- /*
|
|
|
- //Consider putting in logic to restart - daemon may take long time to refresh
|
|
|
- time.Sleep(time.Minute * 5)
|
|
|
- ncutils.Log("restarting netclient")
|
|
|
- daemon.Restart()
|
|
|
- */
|
|
|
}
|
|
|
ncutils.Log("connection re-established with mqtt server")
|
|
|
})
|
|
@@ -149,10 +223,10 @@ func SetupMQTT(cfg *config.ClientConfig, publish bool) mqtt.Client {
|
|
|
for {
|
|
|
//if after 12 seconds, try a gRPC pull on the last try
|
|
|
if time.Now().After(tperiod) {
|
|
|
- ncutils.Log("running pull for " + cfg.Node.Network)
|
|
|
- _, err := Pull(cfg.Node.Network, true)
|
|
|
+ ncutils.Log("running pull for " + commsCfg.Node.Network)
|
|
|
+ _, err := Pull(commsCfg.Node.Network, true)
|
|
|
if err != nil {
|
|
|
- ncutils.Log("could not run pull, exiting " + cfg.Node.Network + " setup: " + err.Error())
|
|
|
+ ncutils.Log("could not run pull, exiting " + commsCfg.Node.Network + " setup: " + err.Error())
|
|
|
return client
|
|
|
}
|
|
|
time.Sleep(time.Second)
|
|
@@ -160,10 +234,10 @@ func SetupMQTT(cfg *config.ClientConfig, publish bool) mqtt.Client {
|
|
|
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
|
|
ncutils.Log("unable to connect to broker, retrying ...")
|
|
|
if time.Now().After(tperiod) {
|
|
|
- ncutils.Log("could not connect to broker, exiting " + cfg.Node.Network + " setup: " + token.Error().Error())
|
|
|
+ ncutils.Log("could not connect to broker, exiting " + commsCfg.Node.Network + " setup: " + token.Error().Error())
|
|
|
if strings.Contains(token.Error().Error(), "connectex") || strings.Contains(token.Error().Error(), "i/o timeout") {
|
|
|
ncutils.PrintLog("connection issue detected.. pulling and restarting daemon", 0)
|
|
|
- Pull(cfg.Node.Network, true)
|
|
|
+ Pull(commsCfg.Node.Network, true)
|
|
|
daemon.Restart()
|
|
|
}
|
|
|
return client
|
|
@@ -176,341 +250,14 @@ func SetupMQTT(cfg *config.ClientConfig, publish bool) mqtt.Client {
|
|
|
return client
|
|
|
}
|
|
|
|
|
|
-// MessageQueue sets up Message Queue and subsribes/publishes updates to/from server
|
|
|
-func MessageQueue(ctx context.Context, network string) {
|
|
|
- ncutils.Log("netclient go routine started for " + network)
|
|
|
- var cfg config.ClientConfig
|
|
|
- cfg.Network = network
|
|
|
- initialPull(cfg.Network)
|
|
|
-
|
|
|
- cfg.ReadConfig()
|
|
|
- ncutils.Log("daemon started for network: " + network)
|
|
|
- client := SetupMQTT(&cfg, false)
|
|
|
-
|
|
|
- defer client.Disconnect(250)
|
|
|
- wg := &sync.WaitGroup{}
|
|
|
- wg.Add(2)
|
|
|
- checkinctx, checkincancel := context.WithCancel(context.Background())
|
|
|
- go Checkin(checkinctx, wg, &cfg, network)
|
|
|
- <-ctx.Done()
|
|
|
- checkincancel()
|
|
|
- ncutils.Log("shutting down message queue for network " + network)
|
|
|
- wg.Wait()
|
|
|
- ncutils.Log("shutdown complete")
|
|
|
-}
|
|
|
-
|
|
|
-// All -- mqtt message hander for all ('#') topics
|
|
|
-var All mqtt.MessageHandler = func(client mqtt.Client, msg mqtt.Message) {
|
|
|
- ncutils.Log("default message handler -- received message but not handling")
|
|
|
- ncutils.Log("Topic: " + string(msg.Topic()))
|
|
|
- //ncutils.Log("Message: " + string(msg.Payload()))
|
|
|
-}
|
|
|
-
|
|
|
-// NodeUpdate -- mqtt message handler for /update/<NodeID> topic
|
|
|
-func NodeUpdate(client mqtt.Client, msg mqtt.Message) {
|
|
|
- var newNode models.Node
|
|
|
- var cfg config.ClientConfig
|
|
|
- var network = parseNetworkFromTopic(msg.Topic())
|
|
|
- cfg.Network = network
|
|
|
- cfg.ReadConfig()
|
|
|
-
|
|
|
- data, dataErr := decryptMsg(&cfg, msg.Payload())
|
|
|
- if dataErr != nil {
|
|
|
- return
|
|
|
- }
|
|
|
- err := json.Unmarshal([]byte(data), &newNode)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error unmarshalling node update data" + err.Error())
|
|
|
- return
|
|
|
- }
|
|
|
-
|
|
|
- ncutils.Log("received message to update node " + newNode.Name)
|
|
|
- // see if cache hit, if so skip
|
|
|
- var currentMessage = read(newNode.Network, lastNodeUpdate)
|
|
|
- if currentMessage == string(data) {
|
|
|
- return
|
|
|
- }
|
|
|
- insert(newNode.Network, lastNodeUpdate, string(data)) // store new message in cache
|
|
|
-
|
|
|
- //check if interface name has changed if so delete.
|
|
|
- if cfg.Node.Interface != newNode.Interface {
|
|
|
- if err = wireguard.RemoveConf(cfg.Node.Interface, true); err != nil {
|
|
|
- ncutils.PrintLog("could not delete old interface "+cfg.Node.Interface+": "+err.Error(), 1)
|
|
|
- }
|
|
|
- }
|
|
|
- newNode.PullChanges = "no"
|
|
|
- //ensure that OS never changes
|
|
|
- newNode.OS = runtime.GOOS
|
|
|
- // check if interface needs to delta
|
|
|
- ifaceDelta := ncutils.IfaceDelta(&cfg.Node, &newNode)
|
|
|
- shouldDNSChange := cfg.Node.DNSOn != newNode.DNSOn
|
|
|
-
|
|
|
- cfg.Node = newNode
|
|
|
- switch newNode.Action {
|
|
|
- case models.NODE_DELETE:
|
|
|
- if cancel, ok := networkcontext.Load(newNode.Network); ok {
|
|
|
- ncutils.Log("cancelling message queue context for " + newNode.Network)
|
|
|
- cancel.(context.CancelFunc)()
|
|
|
- } else {
|
|
|
- ncutils.Log("failed to kill go routines for network " + newNode.Network)
|
|
|
- }
|
|
|
- ncutils.PrintLog(fmt.Sprintf("received delete request for %s", cfg.Node.Name), 1)
|
|
|
- if err = LeaveNetwork(cfg.Node.Network); err != nil {
|
|
|
- if !strings.Contains("rpc error", err.Error()) {
|
|
|
- ncutils.PrintLog(fmt.Sprintf("failed to leave, please check that local files for network %s were removed", cfg.Node.Network), 1)
|
|
|
- }
|
|
|
- ncutils.PrintLog(fmt.Sprintf("%s was removed", cfg.Node.Name), 1)
|
|
|
- return
|
|
|
- }
|
|
|
- ncutils.PrintLog(fmt.Sprintf("%s was removed", cfg.Node.Name), 1)
|
|
|
- return
|
|
|
- case models.NODE_UPDATE_KEY:
|
|
|
- if err := UpdateKeys(&cfg, client); err != nil {
|
|
|
- ncutils.PrintLog("err updating wireguard keys: "+err.Error(), 1)
|
|
|
- }
|
|
|
- case models.NODE_NOOP:
|
|
|
- default:
|
|
|
- }
|
|
|
- // Save new config
|
|
|
- cfg.Node.Action = models.NODE_NOOP
|
|
|
- if err := config.Write(&cfg, cfg.Network); err != nil {
|
|
|
- ncutils.PrintLog("error updating node configuration: "+err.Error(), 1)
|
|
|
- }
|
|
|
- nameserver := cfg.Server.CoreDNSAddr
|
|
|
- privateKey, err := wireguard.RetrievePrivKey(newNode.Network)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error reading PrivateKey " + err.Error())
|
|
|
- return
|
|
|
- }
|
|
|
- file := ncutils.GetNetclientPathSpecific() + cfg.Node.Interface + ".conf"
|
|
|
- if err := wireguard.UpdateWgInterface(file, privateKey, nameserver, newNode); err != nil {
|
|
|
- ncutils.Log("error updating wireguard config " + err.Error())
|
|
|
- return
|
|
|
- }
|
|
|
- if ifaceDelta {
|
|
|
- ncutils.Log("applying WG conf to " + file)
|
|
|
- err = wireguard.ApplyConf(&cfg.Node, cfg.Node.Interface, file)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error restarting wg after node update " + err.Error())
|
|
|
- return
|
|
|
- }
|
|
|
- time.Sleep(time.Second >> 1)
|
|
|
- // if err = Resubscribe(client, &cfg); err != nil {
|
|
|
- // ncutils.Log("error resubscribing after interface change " + err.Error())
|
|
|
- // return
|
|
|
- // }
|
|
|
- if newNode.DNSOn == "yes" {
|
|
|
- for _, server := range newNode.NetworkSettings.DefaultServerAddrs {
|
|
|
- if server.IsLeader {
|
|
|
- go local.SetDNSWithRetry(newNode, server.Address)
|
|
|
- break
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- //deal with DNS
|
|
|
- if newNode.DNSOn != "yes" && shouldDNSChange && cfg.Node.Interface != "" {
|
|
|
- ncutils.Log("settng DNS off")
|
|
|
- _, err := ncutils.RunCmd("/usr/bin/resolvectl revert "+cfg.Node.Interface, true)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error applying dns" + err.Error())
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-// UpdatePeers -- mqtt message handler for peers/<Network>/<NodeID> topic
|
|
|
-func UpdatePeers(client mqtt.Client, msg mqtt.Message) {
|
|
|
- var peerUpdate models.PeerUpdate
|
|
|
- var network = parseNetworkFromTopic(msg.Topic())
|
|
|
- var cfg = config.ClientConfig{}
|
|
|
- cfg.Network = network
|
|
|
- cfg.ReadConfig()
|
|
|
-
|
|
|
- data, dataErr := decryptMsg(&cfg, msg.Payload())
|
|
|
- if dataErr != nil {
|
|
|
- return
|
|
|
- }
|
|
|
- err := json.Unmarshal([]byte(data), &peerUpdate)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error unmarshalling peer data")
|
|
|
- return
|
|
|
- }
|
|
|
- // see if cached hit, if so skip
|
|
|
- var currentMessage = read(peerUpdate.Network, lastPeerUpdate)
|
|
|
- if currentMessage == string(data) {
|
|
|
- return
|
|
|
- }
|
|
|
- insert(peerUpdate.Network, lastPeerUpdate, string(data))
|
|
|
-
|
|
|
- file := ncutils.GetNetclientPathSpecific() + cfg.Node.Interface + ".conf"
|
|
|
- err = wireguard.UpdateWgPeers(file, peerUpdate.Peers)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error updating wireguard peers" + err.Error())
|
|
|
- return
|
|
|
- }
|
|
|
- //err = wireguard.SyncWGQuickConf(cfg.Node.Interface, file)
|
|
|
- var iface = cfg.Node.Interface
|
|
|
- if ncutils.IsMac() {
|
|
|
- iface, err = local.GetMacIface(cfg.Node.Address)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error retrieving mac iface: " + err.Error())
|
|
|
- return
|
|
|
- }
|
|
|
- }
|
|
|
- err = wireguard.SetPeers(iface, cfg.Node.Address, cfg.Node.PersistentKeepalive, peerUpdate.Peers)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error syncing wg after peer update: " + err.Error())
|
|
|
- return
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-// MonitorKeepalive - checks time last server keepalive received. If more than 3+ minutes, notify and resubscribe
|
|
|
-// func MonitorKeepalive(ctx context.Context, wg *sync.WaitGroup, client mqtt.Client, cfg *config.ClientConfig) {
|
|
|
-// defer wg.Done()
|
|
|
-// for {
|
|
|
-// select {
|
|
|
-// case <-ctx.Done():
|
|
|
-// ncutils.Log("cancel recieved, monitor keepalive exiting")
|
|
|
-// return
|
|
|
-// case <-time.After(time.Second * 150):
|
|
|
-// var keepalivetime time.Time
|
|
|
-// keepaliveval, ok := keepalive.Load(cfg.Node.Network)
|
|
|
-// if ok {
|
|
|
-// keepalivetime = keepaliveval.(time.Time)
|
|
|
-// if !keepalivetime.IsZero() && time.Since(keepalivetime) > time.Second*120 { // more than 2+ minutes
|
|
|
-// // ncutils.Log("server keepalive not recieved recently, resubscribe to message queue")
|
|
|
-// // err := Resubscribe(client, cfg)
|
|
|
-// // if err != nil {
|
|
|
-// // ncutils.Log("closing " + err.Error())
|
|
|
-// // }
|
|
|
-// ncutils.Log("maybe wanna call something")
|
|
|
-// }
|
|
|
-// }
|
|
|
-// }
|
|
|
-// }
|
|
|
-// }
|
|
|
-
|
|
|
-// ServerKeepAlive -- handler to react to keepalive messages published by server
|
|
|
-// func ServerKeepAlive(client mqtt.Client, msg mqtt.Message) {
|
|
|
-// var currentTime = time.Now()
|
|
|
-// keepalive.Store(parseNetworkFromTopic(msg.Topic()), currentTime)
|
|
|
-// ncutils.PrintLog("received server keepalive at "+currentTime.String(), 2)
|
|
|
-// }
|
|
|
-
|
|
|
-// UpdateKeys -- updates private key and returns new publickey
|
|
|
-func UpdateKeys(cfg *config.ClientConfig, client mqtt.Client) error {
|
|
|
- ncutils.Log("received message to update keys")
|
|
|
- key, err := wgtypes.GeneratePrivateKey()
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("error generating privatekey " + err.Error())
|
|
|
- return err
|
|
|
- }
|
|
|
- file := ncutils.GetNetclientPathSpecific() + cfg.Node.Interface + ".conf"
|
|
|
- if err := wireguard.UpdatePrivateKey(file, key.String()); err != nil {
|
|
|
- ncutils.Log("error updating wireguard key " + err.Error())
|
|
|
- return err
|
|
|
- }
|
|
|
- cfg.Node.PublicKey = key.PublicKey().String()
|
|
|
- if err := config.ModConfig(&cfg.Node); err != nil {
|
|
|
- ncutils.Log("error updating local config " + err.Error())
|
|
|
- }
|
|
|
- PublishNodeUpdate(cfg)
|
|
|
- if err = wireguard.ApplyConf(&cfg.Node, cfg.Node.Interface, file); err != nil {
|
|
|
- ncutils.Log("error applying new config " + err.Error())
|
|
|
- return err
|
|
|
- }
|
|
|
- return nil
|
|
|
-}
|
|
|
-
|
|
|
-// Checkin -- go routine that checks for public or local ip changes, publishes changes
|
|
|
-// if there are no updates, simply "pings" the server as a checkin
|
|
|
-func Checkin(ctx context.Context, wg *sync.WaitGroup, cfg *config.ClientConfig, network string) {
|
|
|
- defer wg.Done()
|
|
|
- for {
|
|
|
- select {
|
|
|
- case <-ctx.Done():
|
|
|
- ncutils.Log("Checkin cancelled")
|
|
|
- return
|
|
|
- //delay should be configuraable -> use cfg.Node.NetworkSettings.DefaultCheckInInterval ??
|
|
|
- case <-time.After(time.Second * 60):
|
|
|
- // ncutils.Log("Checkin running")
|
|
|
- //read latest config
|
|
|
- cfg.ReadConfig()
|
|
|
- if cfg.Node.Roaming == "yes" && cfg.Node.IsStatic != "yes" {
|
|
|
- extIP, err := ncutils.GetPublicIP()
|
|
|
- if err != nil {
|
|
|
- ncutils.PrintLog("error encountered checking ip addresses: "+err.Error(), 1)
|
|
|
- }
|
|
|
- if cfg.Node.Endpoint != extIP && extIP != "" {
|
|
|
- ncutils.PrintLog("endpoint has changed from "+cfg.Node.Endpoint+" to "+extIP, 1)
|
|
|
- cfg.Node.Endpoint = extIP
|
|
|
- if err := PublishNodeUpdate(cfg); err != nil {
|
|
|
- ncutils.Log("could not publish endpoint change")
|
|
|
- }
|
|
|
- }
|
|
|
- intIP, err := getPrivateAddr()
|
|
|
- if err != nil {
|
|
|
- ncutils.PrintLog("error encountered checking ip addresses: "+err.Error(), 1)
|
|
|
- }
|
|
|
- if cfg.Node.LocalAddress != intIP && intIP != "" {
|
|
|
- ncutils.PrintLog("local Address has changed from "+cfg.Node.LocalAddress+" to "+intIP, 1)
|
|
|
- cfg.Node.LocalAddress = intIP
|
|
|
- if err := PublishNodeUpdate(cfg); err != nil {
|
|
|
- ncutils.Log("could not publish local address change")
|
|
|
- }
|
|
|
- }
|
|
|
- } else {
|
|
|
- localIP, err := ncutils.GetLocalIP(cfg.Node.LocalRange)
|
|
|
- if err != nil {
|
|
|
- ncutils.PrintLog("error encountered checking ip addresses: "+err.Error(), 1)
|
|
|
- }
|
|
|
- if cfg.Node.Endpoint != localIP && localIP != "" {
|
|
|
- ncutils.PrintLog("endpoint has changed from "+cfg.Node.Endpoint+" to "+localIP, 1)
|
|
|
- cfg.Node.Endpoint = localIP
|
|
|
- if err := PublishNodeUpdate(cfg); err != nil {
|
|
|
- ncutils.Log("could not publish localip change")
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- if err := pingServer(cfg); err != nil {
|
|
|
- ncutils.PrintLog("could not ping server "+err.Error(), 0)
|
|
|
- }
|
|
|
- Hello(cfg, network)
|
|
|
- // ncutils.Log("Checkin complete")
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-// PublishNodeUpdates -- saves node and pushes changes to broker
|
|
|
-func PublishNodeUpdate(cfg *config.ClientConfig) error {
|
|
|
- if err := config.Write(cfg, cfg.Network); err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
- data, err := json.Marshal(cfg.Node)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
- if err = publish(cfg, fmt.Sprintf("update/%s", cfg.Node.ID), data); err != nil {
|
|
|
+// publishes a message to server to update peers on this peer's behalf
|
|
|
+func publishSignal(commsCfg, nodeCfg *config.ClientConfig, signal byte) error {
|
|
|
+ if err := publish(commsCfg, nodeCfg, fmt.Sprintf("signal/%s", nodeCfg.Node.ID), []byte{signal}, 1); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
-// Hello -- ping the broker to let server know node is alive and doing fine
|
|
|
-func Hello(cfg *config.ClientConfig, network string) {
|
|
|
- if err := publish(cfg, fmt.Sprintf("ping/%s", cfg.Node.ID), []byte(ncutils.Version)); err != nil {
|
|
|
- ncutils.Log(fmt.Sprintf("error publishing ping, %v", err))
|
|
|
- ncutils.Log("running pull on " + cfg.Node.Network + " to reconnect")
|
|
|
- _, err := Pull(cfg.Node.Network, true)
|
|
|
- if err != nil {
|
|
|
- ncutils.Log("could not run pull on " + cfg.Node.Network + ", error: " + err.Error())
|
|
|
- }
|
|
|
-
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-// == Private ==
|
|
|
-
|
|
|
func initialPull(network string) {
|
|
|
ncutils.Log("pulling latest config for " + network)
|
|
|
var configPath = fmt.Sprintf("%snetconfig-%s", ncutils.GetNetclientPathSpecific(), network)
|
|
@@ -539,67 +286,28 @@ func initialPull(network string) {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func publish(cfg *config.ClientConfig, dest string, msg []byte) error {
|
|
|
- // setup the keys
|
|
|
- trafficPrivKey, err := auth.RetrieveTrafficKey(cfg.Node.Network)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
-
|
|
|
- serverPubKey, err := ncutils.ConvertBytesToKey(cfg.Node.TrafficKeys.Server)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
-
|
|
|
- client := SetupMQTT(cfg, true)
|
|
|
- defer client.Disconnect(250)
|
|
|
- encrypted, err := ncutils.BoxEncrypt(msg, serverPubKey, trafficPrivKey)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
-
|
|
|
- if token := client.Publish(dest, 0, false, encrypted); token.Wait() && token.Error() != nil {
|
|
|
- return token.Error()
|
|
|
- }
|
|
|
- return nil
|
|
|
-}
|
|
|
-
|
|
|
func parseNetworkFromTopic(topic string) string {
|
|
|
return strings.Split(topic, "/")[1]
|
|
|
}
|
|
|
|
|
|
-func decryptMsg(cfg *config.ClientConfig, msg []byte) ([]byte, error) {
|
|
|
+// should only ever use node client configs
|
|
|
+func decryptMsg(nodeCfg *config.ClientConfig, msg []byte) ([]byte, error) {
|
|
|
if len(msg) <= 24 { // make sure message is of appropriate length
|
|
|
- return nil, fmt.Errorf("recieved invalid message from broker %s", string(msg))
|
|
|
+ return nil, fmt.Errorf("recieved invalid message from broker %v", msg)
|
|
|
}
|
|
|
|
|
|
// setup the keys
|
|
|
- diskKey, keyErr := auth.RetrieveTrafficKey(cfg.Node.Network)
|
|
|
+ diskKey, keyErr := auth.RetrieveTrafficKey(nodeCfg.Node.Network)
|
|
|
if keyErr != nil {
|
|
|
return nil, keyErr
|
|
|
}
|
|
|
|
|
|
- serverPubKey, err := ncutils.ConvertBytesToKey(cfg.Node.TrafficKeys.Server)
|
|
|
+ serverPubKey, err := ncutils.ConvertBytesToKey(nodeCfg.Node.TrafficKeys.Server)
|
|
|
if err != nil {
|
|
|
return nil, err
|
|
|
}
|
|
|
|
|
|
- return ncutils.BoxDecrypt(msg, serverPubKey, diskKey)
|
|
|
-}
|
|
|
-
|
|
|
-func pingServer(cfg *config.ClientConfig) error {
|
|
|
- node := getServerAddress(cfg)
|
|
|
- pinger, err := ping.NewPinger(node)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
- pinger.Timeout = 2 * time.Second
|
|
|
- pinger.Run()
|
|
|
- stats := pinger.Statistics()
|
|
|
- if stats.PacketLoss == 100 {
|
|
|
- return errors.New("ping error")
|
|
|
- }
|
|
|
- return nil
|
|
|
+ return ncutils.DeChunk(msg, serverPubKey, diskKey)
|
|
|
}
|
|
|
|
|
|
func getServerAddress(cfg *config.ClientConfig) string {
|
|
@@ -611,3 +319,49 @@ func getServerAddress(cfg *config.ClientConfig) string {
|
|
|
}
|
|
|
return server.Address
|
|
|
}
|
|
|
+
|
|
|
+func getCommsNetworks(networks []string) (map[string]bool, error) {
|
|
|
+ var cfg config.ClientConfig
|
|
|
+ var response = make(map[string]bool, 1)
|
|
|
+ for _, network := range networks {
|
|
|
+ cfg.Network = network
|
|
|
+ cfg.ReadConfig()
|
|
|
+ response[cfg.Node.CommID] = true
|
|
|
+ }
|
|
|
+ return response, nil
|
|
|
+}
|
|
|
+
|
|
|
+func getCommsCfgByNode(node *models.Node) config.ClientConfig {
|
|
|
+ var commsCfg config.ClientConfig
|
|
|
+ commsCfg.Network = node.Network
|
|
|
+ commsCfg.ReadConfig()
|
|
|
+ return commsCfg
|
|
|
+}
|
|
|
+
|
|
|
+// == Message Caches ==
|
|
|
+
|
|
|
+func insert(network, which, cache string) {
|
|
|
+ var newMessage = cachedMessage{
|
|
|
+ Message: cache,
|
|
|
+ LastSeen: time.Now(),
|
|
|
+ }
|
|
|
+ messageCache.Store(fmt.Sprintf("%s%s", network, which), newMessage)
|
|
|
+}
|
|
|
+
|
|
|
+func read(network, which string) string {
|
|
|
+ val, isok := messageCache.Load(fmt.Sprintf("%s%s", network, which))
|
|
|
+ if isok {
|
|
|
+ var readMessage = val.(cachedMessage) // fetch current cached message
|
|
|
+ if readMessage.LastSeen.IsZero() {
|
|
|
+ return ""
|
|
|
+ }
|
|
|
+ if time.Now().After(readMessage.LastSeen.Add(time.Minute * 10)) { // check if message has been there over a minute
|
|
|
+ messageCache.Delete(fmt.Sprintf("%s%s", network, which)) // remove old message if expired
|
|
|
+ return ""
|
|
|
+ }
|
|
|
+ return readMessage.Message // return current message if not expired
|
|
|
+ }
|
|
|
+ return ""
|
|
|
+}
|
|
|
+
|
|
|
+// == End Message Caches ==
|