Browse Source

Merge branch 'develop' into NET-1991

# Conflicts:
#	pro/auth/sync.go
#	pro/initialize.go
#	pro/logic/user_mgmt.go
Vishal Dalwadi 3 months ago
parent
commit
d75f96037c

+ 33 - 29
auth/host_session.go

@@ -3,6 +3,8 @@ package auth
 import (
 	"encoding/json"
 	"fmt"
+	"log/slog"
+	"strings"
 	"time"
 
 	"github.com/google/uuid"
@@ -14,7 +16,6 @@ import (
 	"github.com/gravitl/netmaker/models"
 	"github.com/gravitl/netmaker/mq"
 	"github.com/gravitl/netmaker/servercfg"
-	"golang.org/x/exp/slog"
 )
 
 // SessionHandler - called by the HTTP router when user
@@ -242,37 +243,40 @@ func CheckNetRegAndHostUpdate(networks []string, h *models.Host, relayNodeId uui
 		network := networks[i]
 		if ok, _ := logic.NetworkExists(network); ok {
 			newNode, err := logic.UpdateHostNetwork(h, network, true)
-			if err != nil {
-				logger.Log(0, "failed to add host to network:", h.ID.String(), h.Name, network, err.Error())
-				continue
-			}
-			if len(tags) > 0 {
-				newNode.Tags = make(map[models.TagID]struct{})
-				for _, tagI := range tags {
-					newNode.Tags[tagI] = struct{}{}
-				}
-				logic.UpsertNode(newNode)
-			}
-
-			if relayNodeId != uuid.Nil && !newNode.IsRelayed {
-				// check if relay node exists and acting as relay
-				relaynode, err := logic.GetNodeByID(relayNodeId.String())
-				if err == nil && relaynode.IsRelay && relaynode.Network == newNode.Network {
-					slog.Info(fmt.Sprintf("adding relayed node %s to relay %s on network %s", newNode.ID.String(), relayNodeId.String(), network))
-					newNode.IsRelayed = true
-					newNode.RelayedBy = relayNodeId.String()
-					updatedRelayNode := relaynode
-					updatedRelayNode.RelayedNodes = append(updatedRelayNode.RelayedNodes, newNode.ID.String())
-					logic.UpdateRelayed(&relaynode, &updatedRelayNode)
-					if err := logic.UpsertNode(&updatedRelayNode); err != nil {
-						slog.Error("failed to update node", "nodeid", relayNodeId.String())
+			if err == nil || strings.Contains(err.Error(), "host already part of network") {
+				if len(tags) > 0 {
+					newNode.Tags = make(map[models.TagID]struct{})
+					for _, tagI := range tags {
+						newNode.Tags[tagI] = struct{}{}
 					}
-					if err := logic.UpsertNode(newNode); err != nil {
-						slog.Error("failed to update node", "nodeid", relayNodeId.String())
+					logic.UpsertNode(newNode)
+				}
+				if relayNodeId != uuid.Nil && !newNode.IsRelayed {
+					// check if relay node exists and acting as relay
+					relaynode, err := logic.GetNodeByID(relayNodeId.String())
+					if err == nil && relaynode.IsGw && relaynode.Network == newNode.Network {
+						slog.Error(fmt.Sprintf("adding relayed node %s to relay %s on network %s", newNode.ID.String(), relayNodeId.String(), network))
+						newNode.IsRelayed = true
+						newNode.RelayedBy = relayNodeId.String()
+						updatedRelayNode := relaynode
+						updatedRelayNode.RelayedNodes = append(updatedRelayNode.RelayedNodes, newNode.ID.String())
+						logic.UpdateRelayed(&relaynode, &updatedRelayNode)
+						if err := logic.UpsertNode(&updatedRelayNode); err != nil {
+							slog.Error("failed to update node", "nodeid", relayNodeId.String())
+						}
+						if err := logic.UpsertNode(newNode); err != nil {
+							slog.Error("failed to update node", "nodeid", relayNodeId.String())
+						}
+					} else {
+						slog.Error("failed to relay node. maybe specified relay node is actually not a relay? Or the relayed node is not in the same network with relay?", "err", err)
 					}
-				} else {
-					slog.Error("failed to relay node. maybe specified relay node is actually not a relay? Or the relayed node is not in the same network with relay?", "err", err)
 				}
+				if strings.Contains(err.Error(), "host already part of network") {
+					continue
+				}
+			} else {
+				logger.Log(0, "failed to add host to network:", h.ID.String(), h.Name, network, err.Error())
+				continue
 			}
 			logger.Log(1, "added new node", newNode.ID.String(), "to host", h.Name)
 			hostactions.AddAction(models.HostUpdate{

+ 7 - 2
controllers/egress.go

@@ -269,9 +269,14 @@ func deleteEgress(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 	e := schema.Egress{ID: id}
-	err := e.Delete(db.WithContext(r.Context()))
+	err := e.Get(db.WithContext(r.Context()))
 	if err != nil {
-		logic.ReturnErrorResponse(w, r, logic.FormatError(err, "internal"))
+		logic.ReturnErrorResponse(w, r, logic.FormatError(err, logic.BadReq))
+		return
+	}
+	err = e.Delete(db.WithContext(r.Context()))
+	if err != nil {
+		logic.ReturnErrorResponse(w, r, logic.FormatError(err, logic.Internal))
 		return
 	}
 	logic.LogEvent(&models.Event{

+ 8 - 8
controllers/enrollmentkeys.go

@@ -380,14 +380,14 @@ func handleHostRegister(w http.ResponseWriter, r *http.Request) {
 	} else {
 		// need to revise the list of networks from key
 		// based on the ones host currently has
-		networksToAdd := []string{}
-		currentNets := logic.GetHostNetworks(newHost.ID.String())
-		for _, newNet := range enrollmentKey.Networks {
-			if !logic.StringSliceContains(currentNets, newNet) {
-				networksToAdd = append(networksToAdd, newNet)
-			}
-		}
-		enrollmentKey.Networks = networksToAdd
+		// networksToAdd := []string{}
+		// currentNets := logic.GetHostNetworks(newHost.ID.String())
+		// for _, newNet := range enrollmentKey.Networks {
+		// 	if !logic.StringSliceContains(currentNets, newNet) {
+		// 		networksToAdd = append(networksToAdd, newNet)
+		// 	}
+		// }
+		// enrollmentKey.Networks = networksToAdd
 		currHost, err := logic.GetHost(newHost.ID.String())
 		if err != nil {
 			slog.Error("failed registration", "hostID", newHost.ID.String(), "hostName", newHost.Name, "error", err.Error())

+ 60 - 2
controllers/hosts.go

@@ -96,7 +96,21 @@ func upgradeHosts(w http.ResponseWriter, r *http.Request) {
 			}(host)
 		}
 	}()
-
+	logic.LogEvent(&models.Event{
+		Action: models.UpgradeAll,
+		Source: models.Subject{
+			ID:   r.Header.Get("user"),
+			Name: r.Header.Get("user"),
+			Type: models.UserSub,
+		},
+		TriggeredBy: r.Header.Get("user"),
+		Target: models.Subject{
+			ID:   "All Hosts",
+			Name: "All Hosts",
+			Type: models.DeviceSub,
+		},
+		Origin: models.Dashboard,
+	})
 	slog.Info("upgrade all hosts request received", "user", user)
 	logic.ReturnSuccessResponse(w, r, "upgrade all hosts request received")
 }
@@ -892,6 +906,21 @@ func updateAllKeys(w http.ResponseWriter, r *http.Request) {
 			}
 		}
 	}()
+	logic.LogEvent(&models.Event{
+		Action: models.RefreshAllKeys,
+		Source: models.Subject{
+			ID:   r.Header.Get("user"),
+			Name: r.Header.Get("user"),
+			Type: models.UserSub,
+		},
+		TriggeredBy: r.Header.Get("user"),
+		Target: models.Subject{
+			ID:   "All Devices",
+			Name: "All Devices",
+			Type: models.DeviceSub,
+		},
+		Origin: models.Dashboard,
+	})
 	logger.Log(2, r.Header.Get("user"), "updated keys for all hosts")
 	w.WriteHeader(http.StatusOK)
 }
@@ -927,6 +956,21 @@ func updateKeys(w http.ResponseWriter, r *http.Request) {
 			logger.Log(0, "failed to send host key update", host.ID.String(), err.Error())
 		}
 	}()
+	logic.LogEvent(&models.Event{
+		Action: models.RefreshKey,
+		Source: models.Subject{
+			ID:   r.Header.Get("user"),
+			Name: r.Header.Get("user"),
+			Type: models.UserSub,
+		},
+		TriggeredBy: r.Header.Get("user"),
+		Target: models.Subject{
+			ID:   host.ID.String(),
+			Name: host.Name,
+			Type: models.DeviceSub,
+		},
+		Origin: models.Dashboard,
+	})
 	logger.Log(2, r.Header.Get("user"), "updated key on host", host.Name)
 	w.WriteHeader(http.StatusOK)
 }
@@ -965,7 +1009,21 @@ func syncHosts(w http.ResponseWriter, r *http.Request) {
 			time.Sleep(time.Millisecond * 100)
 		}
 	}()
-
+	logic.LogEvent(&models.Event{
+		Action: models.SyncAll,
+		Source: models.Subject{
+			ID:   r.Header.Get("user"),
+			Name: r.Header.Get("user"),
+			Type: models.UserSub,
+		},
+		TriggeredBy: r.Header.Get("user"),
+		Target: models.Subject{
+			ID:   "All Devices",
+			Name: "All Devices",
+			Type: models.DeviceSub,
+		},
+		Origin: models.Dashboard,
+	})
 	slog.Info("sync all hosts request received", "user", user)
 	logic.ReturnSuccessResponse(w, r, "sync all hosts request received")
 }

+ 2 - 1
controllers/user.go

@@ -900,6 +900,7 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
 	if userchange.PlatformRoleID != user.PlatformRoleID || !logic.CompareMaps(user.UserGroups, userchange.UserGroups) {
 		(&schema.UserAccessToken{UserName: user.UserName}).DeleteAllUserTokens(r.Context())
 	}
+	oldUser := *user
 	e := models.Event{
 		Action: models.Update,
 		Source: models.Subject{
@@ -914,7 +915,7 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
 			Type: models.UserSub,
 		},
 		Diff: models.Diff{
-			Old: user,
+			Old: oldUser,
 			New: userchange,
 		},
 		Origin: models.Dashboard,

+ 0 - 1
logic/acls.go

@@ -1636,7 +1636,6 @@ func GetAclRulesForNode(targetnodeI *models.Node) (rules map[string]models.AclRu
 	} else {
 		taggedNodes = GetTagMapWithNodesByNetwork(models.NetworkID(targetnode.Network), true)
 	}
-	fmt.Printf("TAGGED NODES: %+v\n", taggedNodes)
 	acls := listDevicePolicies(models.NetworkID(targetnode.Network))
 	var targetNodeTags = make(map[models.TagID]struct{})
 	if targetnode.Mutex != nil {

+ 1 - 1
logic/hosts.go

@@ -393,7 +393,7 @@ func UpdateHostNetwork(h *models.Host, network string, add bool) (*models.Node,
 			if !add {
 				return &node, nil
 			} else {
-				return nil, errors.New("host already part of network " + network)
+				return &node, errors.New("host already part of network " + network)
 			}
 		}
 	}

+ 4 - 1
logic/peers.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"net"
 	"net/netip"
+	"time"
 
 	"github.com/google/uuid"
 	"github.com/gravitl/netmaker/database"
@@ -190,9 +191,11 @@ func GetPeerUpdateForHost(network string, host *models.Host, allNodes []models.N
 		if err != nil {
 			continue
 		}
-		if !node.Connected || node.PendingDelete || node.Action == models.NODE_DELETE {
+
+		if !node.Connected || node.PendingDelete || node.Action == models.NODE_DELETE || time.Since(node.LastCheckIn) > time.Hour {
 			continue
 		}
+
 		GetNodeEgressInfo(&node)
 		hostPeerUpdate = SetDefaultGw(node, hostPeerUpdate)
 		if !hostPeerUpdate.IsInternetGw {

+ 4 - 1
logic/telemetry.go

@@ -81,7 +81,8 @@ func sendTelemetry() error {
 			Set("pro_trial_end_date", d.ProTrialEndDate.In(time.UTC).Format("2006-01-02")).
 			Set("admin_email", adminEmail).
 			Set("email", adminEmail). // needed for posthog intgration with hubspot. "admin_email" can only be removed if not used in posthog
-			Set("is_saas_tenant", d.IsSaasTenant),
+			Set("is_saas_tenant", d.IsSaasTenant).
+			Set("domain", d.Domain),
 	})
 }
 
@@ -105,6 +106,7 @@ func FetchTelemetryData() telemetryData {
 		data.IsProTrial = true
 	}
 	data.IsSaasTenant = servercfg.DeployedByOperator()
+	data.Domain = servercfg.GetNmBaseDomain()
 	return data
 }
 
@@ -202,6 +204,7 @@ type telemetryData struct {
 	IsProTrial      bool
 	ProTrialEndDate time.Time
 	IsSaasTenant    bool
+	Domain          string
 }
 
 // clientCount - What types of netclients we're tallying

+ 41 - 2
logic/zombie.go

@@ -7,6 +7,7 @@ import (
 	"github.com/google/uuid"
 	"github.com/gravitl/netmaker/logger"
 	"github.com/gravitl/netmaker/models"
+	"github.com/gravitl/netmaker/servercfg"
 )
 
 const (
@@ -77,7 +78,7 @@ func checkForZombieHosts(h *models.Host) {
 func ManageZombies(ctx context.Context, peerUpdate chan *models.Node) {
 	logger.Log(2, "Zombie management started")
 	go InitializeZombies()
-	go checkPendingRemovalNodes()
+	go checkPendingRemovalNodes(peerUpdate)
 	// Zombie Nodes Cleanup Four Times a Day
 	ticker := time.NewTicker(time.Hour * ZOMBIE_TIMEOUT)
 
@@ -135,15 +136,53 @@ func ManageZombies(ctx context.Context, peerUpdate chan *models.Node) {
 					}
 				}
 			}
+			if servercfg.IsAutoCleanUpEnabled() {
+				nodes, _ := GetAllNodes()
+				for _, node := range nodes {
+					if time.Since(node.LastCheckIn) > time.Minute*ZOMBIE_DELETE_TIME {
+						if err := DeleteNode(&node, true); err != nil {
+							continue
+						}
+						node.PendingDelete = true
+						node.Action = models.NODE_DELETE
+						peerUpdate <- &node
+						host, err := GetHost(node.HostID.String())
+						if err == nil && len(host.Nodes) == 0 {
+							RemoveHostByID(host.ID.String())
+						}
+
+					}
+				}
+			}
+
 		}
 	}
 }
-func checkPendingRemovalNodes() {
+func checkPendingRemovalNodes(peerUpdate chan *models.Node) {
 	nodes, _ := GetAllNodes()
 	for _, node := range nodes {
+		node := node
 		pendingDelete := node.PendingDelete || node.Action == models.NODE_DELETE
 		if pendingDelete {
 			DeleteNode(&node, true)
+			peerUpdate <- &node
+			continue
+		}
+		if servercfg.IsAutoCleanUpEnabled() {
+			if time.Since(node.LastCheckIn) > time.Minute*ZOMBIE_DELETE_TIME {
+				if err := DeleteNode(&node, true); err != nil {
+					continue
+				}
+				node.PendingDelete = true
+				node.Action = models.NODE_DELETE
+				peerUpdate <- &node
+				host, err := GetHost(node.HostID.String())
+				if err == nil && len(host.Nodes) == 0 {
+					RemoveHostByID(host.ID.String())
+				}
+
+			}
+
 		}
 	}
 }

+ 1 - 1
main.go

@@ -189,7 +189,7 @@ func runMessageQueue(wg *sync.WaitGroup, ctx context.Context) {
 	defer mq.CloseClient()
 	go mq.Keepalive(ctx)
 	go func() {
-		peerUpdate := make(chan *models.Node)
+		peerUpdate := make(chan *models.Node, 100)
 		go logic.ManageZombies(ctx, peerUpdate)
 		go logic.DeleteExpiredNodes(ctx, peerUpdate)
 		for nodeUpdate := range peerUpdate {

+ 4 - 0
models/events.go

@@ -11,6 +11,10 @@ const (
 	LogOut            Action = "LOGOUT"
 	Connect           Action = "CONNECT"
 	Sync              Action = "SYNC"
+	RefreshKey        Action = "REFRESH_KEY"
+	RefreshAllKeys    Action = "REFRESH_ALL_KEYS"
+	SyncAll           Action = "SYNC_ALL"
+	UpgradeAll        Action = "UPGRADE_ALL"
 	Disconnect        Action = "DISCONNECT"
 	JoinHostToNet     Action = "JOIN_HOST_TO_NETWORK"
 	RemoveHostFromNet Action = "REMOVE_HOST_FROM_NETWORK"

+ 16 - 0
pro/controllers/users.go

@@ -210,6 +210,7 @@ func inviteUsers(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 	for _, inviteeEmail := range inviteReq.UserEmails {
+		inviteeEmail = strings.ToLower(inviteeEmail)
 		// check if user with email exists, then ignore
 		if !email.IsValid(inviteeEmail) {
 			logic.ReturnErrorResponse(w, r, logic.FormatError(errors.New("invalid email "+inviteeEmail), "badrequest"))
@@ -362,6 +363,21 @@ func deleteAllUserInvites(w http.ResponseWriter, r *http.Request) {
 		logic.ReturnErrorResponse(w, r, logic.FormatError(errors.New("failed to delete all pending user invites "+err.Error()), "internal"))
 		return
 	}
+	logic.LogEvent(&models.Event{
+		Action: models.DeleteAll,
+		Source: models.Subject{
+			ID:   r.Header.Get("user"),
+			Name: r.Header.Get("user"),
+			Type: models.UserSub,
+		},
+		TriggeredBy: r.Header.Get("user"),
+		Target: models.Subject{
+			ID:   "All Invites",
+			Name: "All Invites",
+			Type: models.UserInviteSub,
+		},
+		Origin: models.Dashboard,
+	})
 	logic.ReturnSuccessResponse(w, r, "cleared all pending user invites")
 }
 

+ 1 - 1
pro/initialize.go

@@ -95,7 +95,7 @@ func InitPro() {
 		proLogic.InitFailOverCache()
 		auth.StartSyncHook()
 		email.Init()
-		proLogic.EventWatcher()
+		go proLogic.EventWatcher()
 	})
 	logic.ResetFailOver = proLogic.ResetFailOver
 	logic.ResetFailedOverPeer = proLogic.ResetFailedOverPeer

+ 3 - 0
scripts/netmaker.default.env

@@ -102,3 +102,6 @@ STUN=true
 METRICS_PORT=51821
 # Metrics Collection interval in minutes
 PUBLISH_METRIC_INTERVAL=15
+# auto delete offline nodes
+AUTO_DELETE_OFFLINE_NODES=false
+

+ 4 - 0
servercfg/serverconf.go

@@ -875,3 +875,7 @@ func GetAllowedEmailDomains() string {
 func GetNmBaseDomain() string {
 	return os.Getenv("NM_DOMAIN")
 }
+
+func IsAutoCleanUpEnabled() bool {
+	return os.Getenv("AUTO_DELETE_OFFLINE_NODES") == "true"
+}