Browse Source

Add dashboard deps and build to makefile
Add mitigation to analytics collection for HELO spamming

Jordan Schalm 8 years ago
parent
commit
a0e78f72c0
3 changed files with 30 additions and 7 deletions
  1. 5 1
      Makefile
  2. 8 1
      dashboard/dashboard.go
  3. 17 5
      dashboard/datastore.go

+ 5 - 1
Makefile

@@ -20,9 +20,13 @@ clean:
 dependencies:
 	$(GO_VARS) $(GO) list -f='{{ join .Deps "\n" }}' $(ROOT)/cmd/guerrillad | grep -v $(ROOT) | tr '\n' ' ' | $(GO_VARS) xargs $(GO) get -u -v
 	$(GO_VARS) $(GO) list -f='{{ join .Deps "\n" }}' $(ROOT)/cmd/guerrillad | grep -v $(ROOT) | tr '\n' ' ' | $(GO_VARS) xargs $(GO) install -v
+	cd dashboard/js && npm install && cd ../..
 
-guerrillad: *.go */*.go */*/*.go
+dashboard: dashboard/*
+	cd dashboard/js && npm run build && cd ../..
 	statik -src=dashboard/js/build -dest=dashboard
+
+guerrillad: *.go */*.go */*/*.go
 	$(GO_VARS) $(GO) build -o="guerrillad" -ldflags="$(LD_FLAGS)" $(ROOT)/cmd/guerrillad
 
 test: *.go */*.go */*/*.go

+ 8 - 1
dashboard/dashboard.go

@@ -33,6 +33,11 @@ type Config struct {
 	MaxWindow string `json:"max_window"`
 	// Granularity for which rankings are aggregated
 	RankingUpdateInterval string `json:"ranking_aggregation_interval"`
+	// Determines at which ratio of unique HELOs to unique connections we
+	// will stop collecting data to prevent memory exhaustion attack.
+	// Number between 0-1, set to >1 if you never want to stop collecting data.
+	// Default is 0.8
+	UniqueHeloRatioMax float64 `json:"unique_helo_ratio"`
 }
 
 // Begin collecting data and listening for dashboard clients
@@ -50,7 +55,6 @@ func Run(c *Config) {
 
 	go dataListener(tickInterval)
 	go store.rankingManager()
-	log.Info("hi")
 
 	err := http.ListenAndServe(c.ListenInterface, r)
 	log.WithError(err).Error("Dashboard server failed to start")
@@ -78,6 +82,9 @@ func applyConfig(c *Config) {
 			tickInterval = ti
 		}
 	}
+	if config.UniqueHeloRatioMax > 0 {
+		uniqueHeloRatioMax = config.UniqueHeloRatioMax
+	}
 
 	maxTicks = int(maxWindow * tickInterval)
 	nRankingBuffers = int(maxWindow / rankingUpdateInterval)

+ 17 - 5
dashboard/datastore.go

@@ -17,10 +17,10 @@ const (
 )
 
 var (
-	// Log for sending client events from the server to the dashboard.
 	tickInterval          = time.Second * 5
 	maxWindow             = time.Hour * 24
 	rankingUpdateInterval = time.Hour * 6
+	uniqueHeloRatioMax    = 0.8
 	maxTicks              = int(maxWindow / tickInterval)
 	nRankingBuffers       = int(maxWindow / rankingUpdateInterval)
 	LogHook               = logHook(1)
@@ -40,10 +40,12 @@ type dataStore struct {
 	// List of samples of number of connected clients
 	nClientTicks []point
 	// Up-to-date number of clients
-	nClients  uint64
-	topDomain bufferedRanking
-	topHelo   bufferedRanking
-	topIP     bufferedRanking
+	nClients uint64
+	// Total number of clients in the current aggregation buffer
+	nClientsInBuffer uint64
+	topDomain        bufferedRanking
+	topHelo          bufferedRanking
+	topIP            bufferedRanking
 	// For notifying the store about new connections
 	newConns chan conn
 	subs     map[string]chan<- *message
@@ -86,7 +88,16 @@ func (ds *dataStore) rankingManager() {
 	for {
 		select {
 		case c := <-ds.newConns:
+			nHelos := len(ds.topHelo)
+			if nHelos > 5 &&
+				float64(nHelos)/float64(ds.nClientsInBuffer) > uniqueHeloRatioMax {
+				// If too many unique HELO messages are detected as a ratio to the total
+				// number of clients, quit collecting data until we roll over into the next
+				// aggregation buffer.
+				continue
+			}
 			ds.lock.Lock()
+			ds.nClientsInBuffer++
 			ds.topDomain[0][c.domain]++
 			ds.topHelo[0][c.helo]++
 			ds.topIP[0][c.ip]++
@@ -95,6 +106,7 @@ func (ds *dataStore) rankingManager() {
 		case <-ticker.C:
 			ds.lock.Lock()
 			// Add empty map at index 0 and shift other maps one down
+			ds.nClientsInBuffer = 0
 			ds.topDomain = append(
 				[]map[string]int{map[string]int{}},
 				ds.topDomain[:len(ds.topDomain)-1]...)