Browse Source

more structured logging

flashmob 5 years ago
parent
commit
7f22655671

+ 5 - 6
api_test.go

@@ -457,9 +457,8 @@ var funkyLogger = func() backends.Decorator {
 			func(e *mail.Envelope, task backends.SelectTask) (backends.Result, error) {
 				if task == backends.TaskValidateRcpt {
 					// log the last recipient appended to e.Rcpt
-					backends.Log().Infof(
-						"another funky recipient [%s]",
-						e.RcptTo[len(e.RcptTo)-1])
+					backends.Log().Fields("recipient", e.RcptTo[len(e.RcptTo)-1]).Info(
+						"another funky recipient")
 					// if valid then forward call to the next processor in the chain
 					return p.Process(e, task)
 				} else if task == backends.TaskSaveMail {
@@ -1052,7 +1051,7 @@ func TestStreamProcessor(t *testing.T) {
 	}
 
 	// lets check for fingerprints
-	if strings.Index(string(b), "Debug stream") < 0 {
+	if strings.Index(string(b), "debug stream") < 0 {
 		t.Error("did not log: Debug stream")
 	}
 
@@ -1131,7 +1130,7 @@ func TestStreamProcessorBackground(t *testing.T) {
 	}
 	time.Sleep(time.Second * 2)
 	// lets check for fingerprints
-	if strings.Index(string(b), "Debug stream") < 0 {
+	if strings.Index(string(b), "debug stream") < 0 {
 		t.Error("did not log: Debug stream")
 	}
 
@@ -1445,7 +1444,7 @@ func TestStreamMimeProcessor(t *testing.T) {
 	}
 
 	// lets check for fingerprints
-	if strings.Index(string(b), "Debug stream") < 0 {
+	if strings.Index(string(b), "debug stream") < 0 {
 		t.Error("did not log: Debug stream")
 	}
 

+ 5 - 5
backends/p_debugger.go

@@ -47,14 +47,14 @@ func Debugger() Decorator {
 		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
 			if task == TaskSaveMail {
 				if config.LogReceivedMails {
-					Log().Infof("Mail from: %s / to: %v", e.MailFrom.String(), e.RcptTo)
-					Log().Info("Headers are:", e.Header)
-					Log().Info("Body:", e.Data.String())
+					Log().Fields("queuedID", e.QueuedId, "from", e.MailFrom.String(), "to", e.RcptTo).Info("save mail")
+					Log().Fields("queuedID", e.QueuedId, "headers", e.Header).Info("headers dump")
+					Log().Fields("queuedID", e.QueuedId, "body", e.Data.String()).Info("body dump")
 				}
 				if config.SleepSec > 0 {
-					Log().Infof("sleeping for %d", config.SleepSec)
+					Log().Fields("queuedID", e.QueuedId, "sleep", config.SleepSec).Info("sleeping")
 					time.Sleep(time.Second * time.Duration(config.SleepSec))
-					Log().Infof("woke up")
+					Log().Fields("queuedID", e.QueuedId).Info("woke up")
 
 					if config.SleepSec == 1 {
 						panic("panic on purpose")

+ 16 - 12
backends/p_guerrilla_db_redis.go

@@ -186,24 +186,23 @@ func (g *GuerrillaDBAndRedisBackend) doQuery(c int, db *sql.DB, insertStmt *sql.
 	var execErr error
 	defer func() {
 		if r := recover(); r != nil {
-			//logln(1, fmt.Sprintf("Recovered in %v", r))
-			Log().Error("Recovered form panic:", r, string(debug.Stack()))
 			sum := 0
 			for _, v := range *vals {
 				if str, ok := v.(string); ok {
 					sum = sum + len(str)
 				}
 			}
-			Log().Errorf("panic while inserting query [%s] size:%d, err %v", r, sum, execErr)
+			Log().Fields("panic", fmt.Sprintf("%v", r),
+				"size", sum,
+				"error", execErr,
+				"stack", string(debug.Stack())).
+				Error("panic while inserting query")
 			panic("query failed")
 		}
 	}()
 	// prepare the query used to insert when rows reaches batchMax
 	insertStmt = g.prepareInsertQuery(c, db)
 	_, execErr = insertStmt.Exec(*vals...)
-	//if rand.Intn(2) == 1 {
-	//	return errors.New("uggabooka")
-	//}
 	if execErr != nil {
 		Log().WithError(execErr).Error("There was a problem the insert")
 	}
@@ -248,7 +247,7 @@ func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(
 				// retry the sql query
 				attempts := 3
 				for i := 0; i < attempts; i++ {
-					Log().Infof("retrying query query rows[%c] ", c)
+					Log().Fields("rows", c).Info("retrying query query rows ")
 					time.Sleep(time.Second)
 					err = g.doQuery(c, db, insertStmt, &vals)
 					if err == nil {
@@ -274,7 +273,7 @@ func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(
 		select {
 		// it may panic when reading on a closed feeder channel. feederOK detects if it was closed
 		case <-stop:
-			Log().Infof("MySQL query batcher stopped (#%d)", batcherId)
+			Log().Fields("batcherID", batcherId).Info("MySQL query batcher stopped")
 			// Insert any remaining rows
 			inserter(count)
 			feederOk = false
@@ -284,7 +283,12 @@ func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(
 
 			vals = append(vals, row...)
 			count++
-			Log().Debug("new feeder row:", row, " cols:", len(row), " count:", count, " worker", batcherId)
+			Log().Fields(
+				"row", row,
+				"cols", len(row),
+				"count", count,
+				"worker", batcherId,
+			).Debug("new feeder row")
 			if count >= GuerrillaDBAndRedisBatchMax {
 				inserter(GuerrillaDBAndRedisBatchMax)
 			}
@@ -378,7 +382,7 @@ func GuerrillaDbRedis() Decorator {
 			// we loop so that if insertQueryBatcher panics, it can recover and go in again
 			for {
 				if feederOK := g.insertQueryBatcher(feeder, db, qbID, stop); !feederOK {
-					Log().Debugf("insertQueryBatcher exited (#%d)", qbID)
+					Log().Fields("qbID", qbID).Debug("insertQueryBatcher exited")
 					return
 				}
 				Log().Debug("resuming insertQueryBatcher")
@@ -394,7 +398,7 @@ func GuerrillaDbRedis() Decorator {
 			if err := db.Close(); err != nil {
 				Log().WithError(err).Error("close sql database")
 			} else {
-				Log().Infof("closed sql database")
+				Log().Info("closed sql database")
 			}
 		}
 
@@ -402,7 +406,7 @@ func GuerrillaDbRedis() Decorator {
 			if err := redisClient.conn.Close(); err != nil {
 				Log().WithError(err).Error("close redis failed")
 			} else {
-				Log().Infof("closed redis")
+				Log().Info("closed redis")
 			}
 		}
 		// send a close signal to all query batchers to exit.

+ 7 - 2
backends/p_sql.go

@@ -142,14 +142,19 @@ func (s *SQLProcessor) prepareInsertQuery(rows int, db *sql.DB) *sql.Stmt {
 func (s *SQLProcessor) doQuery(c int, db *sql.DB, insertStmt *sql.Stmt, vals *[]interface{}) (execErr error) {
 	defer func() {
 		if r := recover(); r != nil {
-			Log().Error("Recovered form panic:", r, string(debug.Stack()))
 			sum := 0
 			for _, v := range *vals {
 				if str, ok := v.(string); ok {
 					sum = sum + len(str)
 				}
 			}
-			Log().Errorf("panic while inserting query [%s] size:%d, err %v", r, sum, execErr)
+			Log().Fields(
+				"panic", fmt.Sprintf("%v", r),
+				"size", sum,
+				"error", execErr,
+				"stack", string(debug.Stack()),
+			).
+				Error("panic while inserting query")
 			panic("query failed")
 		}
 	}()

+ 6 - 6
backends/s_debug.go

@@ -1,7 +1,6 @@
 package backends
 
 import (
-	"fmt"
 	"github.com/flashmob/go-guerrilla/mail"
 	"time"
 )
@@ -34,24 +33,25 @@ type streamDebuggerConfig struct {
 func StreamDebug() *StreamDecorator {
 	sd := &StreamDecorator{}
 	var config streamDebuggerConfig
+	var envelope *mail.Envelope
 	sd.Configure = func(cfg ConfigGroup) error {
 		return sd.ExtractConfig(cfg, &config)
 	}
 	sd.Decorate =
 		func(sp StreamProcessor, a ...interface{}) StreamProcessor {
 			sd.Open = func(e *mail.Envelope) error {
+				envelope = e
 				return nil
 			}
 			return StreamProcessWith(func(p []byte) (int, error) {
-				str := string(p)
+
 				if config.LogReads {
-					fmt.Print(str)
-					Log().WithField("p", string(p)).Info("Debug stream")
+					Log().Fields("queuedID", envelope.QueuedId, "payload", string(p)).Info("debug stream")
 				}
 				if config.SleepSec > 0 {
-					Log().Infof("sleeping for %d", config.SleepSec)
+					Log().Fields("queuedID", envelope.QueuedId, "sleep", config.SleepSec).Info("sleeping")
 					time.Sleep(time.Second * time.Duration(config.SleepSec))
-					Log().Infof("woke up")
+					Log().Fields("queuedID", envelope.QueuedId).Info("woke up")
 
 					if config.SleepSec == 1 {
 						panic("panic on purpose")

+ 9 - 8
cmd/guerrillad/serve.go

@@ -47,7 +47,7 @@ func init() {
 	var err error
 	mainlog, err = log.GetLogger(log.OutputStderr.String(), log.InfoLevel.String())
 	if err != nil && mainlog != nil {
-		mainlog.WithError(err).Errorf("Failed creating a logger to %s", log.OutputStderr)
+		mainlog.Fields("error", err, "file", log.OutputStderr).Error("failed creating a logger")
 	}
 	cfgFile := "goguerrilla.conf" // deprecated default name
 	if _, err := os.Stat(cfgFile); err != nil {
@@ -83,7 +83,7 @@ func sigHandler() {
 				mainlog.WithError(err).Error("reopening logs failed")
 			}
 		} else if sig == syscall.SIGTERM || sig == syscall.SIGQUIT || sig == syscall.SIGINT || sig == os.Kill {
-			mainlog.Infof("Shutdown signal caught")
+			mainlog.Info("shutdown signal caught")
 			go func() {
 				select {
 				// exit if graceful shutdown not finished in 60 sec.
@@ -93,10 +93,10 @@ func sigHandler() {
 				}
 			}()
 			d.Shutdown()
-			mainlog.Infof("Shutdown completed, exiting.")
+			mainlog.Info("Shutdown completed, exiting")
 			return
 		} else {
-			mainlog.Infof("Shutdown, unknown signal caught")
+			mainlog.Info("Shutdown, unknown signal caught")
 			return
 		}
 	}
@@ -107,19 +107,20 @@ func serve(cmd *cobra.Command, args []string) {
 	d = guerrilla.Daemon{Logger: mainlog}
 	c, err := readConfig(configPath, pidFile)
 	if err != nil {
-		mainlog.WithError(err).Fatal("Error while reading config")
+		mainlog.WithError(err).Fatal("error while reading config")
 	}
 	_ = d.SetConfig(*c)
 
 	// Check that max clients is not greater than system open file limit.
 	if ok, maxClients, fileLimit := guerrilla.CheckFileLimit(c); !ok {
-		mainlog.Fatalf("Combined max clients for all servers (%d) is greater than open file limit (%d). "+
-			"Please increase your open file limit or decrease max clients.", maxClients, fileLimit)
+		mainlog.Fields("naxClients", maxClients, "fileLimit", fileLimit).
+			Fatal("combined max clients for all servers is greater than open file limit, " +
+				"please increase your open file limit or decrease max clients")
 	}
 
 	err = d.Start()
 	if err != nil {
-		mainlog.WithError(err).Error("Error(s) when creating new server(s)")
+		mainlog.WithError(err).Error("error(s) when creating new server(s)")
 		os.Exit(1)
 	}
 	sigHandler()

+ 30 - 20
cmd/guerrillad/serve_test.go

@@ -338,14 +338,19 @@ const testPauseDuration = time.Millisecond * 1010
 // reload config
 func sigHup(pidfile string) {
 	if data, err := ioutil.ReadFile(pidfile); err == nil {
-		mainlog.Infof("pid read is %s", data)
-		ecmd := exec.Command("kill", "-HUP", string(data))
-		_, err = ecmd.Output()
-		if err != nil {
-			mainlog.Infof("could not SIGHUP", err)
+		if pid, err := strconv.Atoi(string(data)); err != nil {
+			mainlog.WithError(err).Error("could not read pid")
+		} else {
+			mainlog.Fields("pid", pid).Info("pid read")
+			ecmd := exec.Command("kill", "-HUP", string(data))
+			_, err = ecmd.Output()
+			if err != nil {
+				mainlog.WithError(err).Error("could not SIGHUP")
+			}
 		}
+
 	} else {
-		mainlog.WithError(err).Info("sighup - Could not read pidfle")
+		mainlog.Fields("error", err, "file", pidfile).Info("sighup - Could not read pidfle")
 	}
 
 }
@@ -353,12 +358,17 @@ func sigHup(pidfile string) {
 // shutdown after calling serve()
 func sigKill(pidfile string) {
 	if data, err := ioutil.ReadFile(pidfile); err == nil {
-		mainlog.Infof("pid read is %s", data)
-		ecmd := exec.Command("kill", string(data))
-		_, err = ecmd.Output()
-		if err != nil {
-			mainlog.Infof("could not sigkill", err)
+		if pid, err := strconv.Atoi(string(data)); err != nil {
+			mainlog.WithError(err).Error("could not read pid")
+		} else {
+			mainlog.Fields("pid", pid).Info("pid read")
+			ecmd := exec.Command("kill", string(data))
+			_, err = ecmd.Output()
+			if err != nil {
+				mainlog.WithError(err).Info("could not sigkill")
+			}
 		}
+
 	} else {
 		mainlog.WithError(err).Info("sigKill - Could not read pidfle")
 	}
@@ -710,7 +720,7 @@ func TestServe(t *testing.T) {
 	// did backend started as expected?
 
 	// wait for shutdown
-	if _, err := grepTestlog("Backend shutdown completed", 0); err != nil {
+	if _, err := grepTestlog("backend shutdown completed", 0); err != nil {
 		t.Error("server didn't stop")
 	}
 
@@ -796,7 +806,7 @@ func TestServerAddEvent(t *testing.T) {
 	}
 
 	if matched, err := matchTestlog(
-		1, "msg", "Backend shutdown completed",
+		1, "msg", "backend shutdown completed",
 	); !matched {
 		t.Error("Server failed to stop", err)
 	}
@@ -873,7 +883,7 @@ func TestServerStartEvent(t *testing.T) {
 	// shutdown and wait for exit
 	d.Shutdown()
 
-	if _, err := grepTestlog("Backend shutdown completed", 0); err != nil {
+	if _, err := grepTestlog("backend shutdown completed", 0); err != nil {
 		t.Error("server didn't stop")
 	}
 
@@ -978,7 +988,7 @@ func TestServerStopEvent(t *testing.T) {
 	d.Shutdown()
 
 	// wait for shutdown
-	if _, err := grepTestlog("Backend shutdown completed", 0); err != nil {
+	if _, err := grepTestlog("backend shutdown completed", 0); err != nil {
 		t.Error("server didn't stop")
 	}
 }
@@ -1112,7 +1122,7 @@ func TestAllowedHostsEvent(t *testing.T) {
 	d.Shutdown()
 
 	// wait for shutdown
-	if _, err := grepTestlog("Backend shutdown completed", 0); err != nil {
+	if _, err := grepTestlog("backend shutdown completed", 0); err != nil {
 		t.Error("server didn't stop")
 	}
 
@@ -1222,7 +1232,7 @@ func TestTLSConfigEvent(t *testing.T) {
 	sigHup("pidfile.pid")
 
 	// wait for config to reload
-	if _, err := grepTestlog("Server [127.0.0.1:4655] re-opened", 0); err != nil {
+	if _, err := matchTestlog(1, "iface", "127.0.0.1:4655", "msg", "server re-opened"); err != nil {
 		t.Error("server didn't catch sighup")
 	}
 
@@ -1243,7 +1253,7 @@ func TestTLSConfigEvent(t *testing.T) {
 	d.Shutdown()
 
 	// wait for shutdown
-	if _, err := grepTestlog("Backend shutdown completed", 0); err != nil {
+	if _, err := grepTestlog("backend shutdown completed", 0); err != nil {
 		t.Error("server didn't stop")
 	}
 
@@ -1401,7 +1411,7 @@ func TestBadTLSReload(t *testing.T) {
 	d.Shutdown()
 
 	// wait for shutdown
-	if _, err := grepTestlog("Backend shutdown completed", 0); err != nil {
+	if _, err := grepTestlog("backend shutdown completed", 0); err != nil {
 		t.Error("server didn't stop")
 	}
 
@@ -1487,7 +1497,7 @@ func TestSetTimeoutEvent(t *testing.T) {
 	d.Shutdown()
 
 	// wait for shutdown
-	if _, err := grepTestlog("Backend shutdown completed", 0); err != nil {
+	if _, err := grepTestlog("backend shutdown completed", 0); err != nil {
 		t.Error("server didn't stop")
 	}
 	// so the connection we have opened should timeout by now

+ 5 - 3
cmd/guerrillad/version.go

@@ -20,7 +20,9 @@ func init() {
 }
 
 func logVersion() {
-	mainlog.Infof("guerrillad %s", guerrilla.Version)
-	mainlog.Debugf("Build Time: %s", guerrilla.BuildTime)
-	mainlog.Debugf("Commit:     %s", guerrilla.Commit)
+	mainlog.Fields(
+		"version", guerrilla.Version,
+		"buildTime", guerrilla.BuildTime,
+		"commit", guerrilla.Commit).
+		Info("guerrillad")
 }

+ 88 - 40
guerrilla.go

@@ -168,14 +168,16 @@ func (g *guerrilla) makeServers() error {
 			continue
 		}
 		if err := sc.Validate(); err != nil {
-			g.mainlog().WithError(errs).Errorf("Failed to create server [%s]", sc.ListenInterface)
+			g.mainlog().Fields("error", errs, "iface", sc.ListenInterface).
+				Error("failed to create server")
 			errs = append(errs, err)
 			continue
 		} else {
 			sc := sc // pin!
 			server, err := newServer(&sc, g.backend(sc.Gateway), g.mainlog(), serverID)
 			if err != nil {
-				g.mainlog().WithError(err).Errorf("Failed to create server [%s]", sc.ListenInterface)
+				g.mainlog().Fields("error", err, "iface", sc.ListenInterface).
+					Error("failed to create server")
 				errs = append(errs, err)
 			}
 			if server != nil {
@@ -335,9 +337,11 @@ func (g *guerrilla) subscribeEvents() {
 
 	// add a new server to the config & start
 	events[EventConfigServerNew] = serverEvent(func(sc *ServerConfig) {
-		g.mainlog().Fields("iface", sc.ListenInterface, "event", EventConfigServerNew).Debug("event fired")
+		values := []interface{}{"iface", sc.ListenInterface, "event", EventConfigServerNew}
+		g.mainlog().Fields(values...).
+			Debug("event fired")
 		if _, err := g.findServer(sc.ListenInterface); err != nil {
-			values := []interface{}{"iface", sc.ListenInterface, "event", EventConfigServerNew}
+
 			// not found, lets add it
 			if err := g.makeServers(); err != nil {
 				g.mainlog().Fields(append(values, "error", err)...).
@@ -349,23 +353,29 @@ func (g *guerrilla) subscribeEvents() {
 				err := g.Start()
 				if err != nil {
 					g.mainlog().Fields(append(values, "error", err)...).
-						Error("Event server_change:new_server returned errors when starting")
+						Error("new server errors when starting")
 				}
 			}
 		} else {
-			g.mainlog().Fields("event", EventConfigServerNew).Debug("new event, but server already fund")
+			g.mainlog().Fields(values...).
+				Debug("new event, but server already fund")
 		}
 	})
 
 	// start a server that already exists in the config and has been enabled
 	events[EventConfigServerStart] = serverEvent(func(sc *ServerConfig) {
 		if server, err := g.findServer(sc.ListenInterface); err == nil {
+			fields := []interface{}{
+				"iface", server.listenInterface,
+				"serverID", server.serverID,
+				"event", EventConfigServerStart}
 			if server.state == ServerStateStopped || server.state == ServerStateNew {
-				g.mainlog().Fields("iface", server.listenInterface, "serverID", server.serverID).
-					Info("Starting server")
+				g.mainlog().Fields(fields...).
+					Info("starting server")
 				err := g.Start()
 				if err != nil {
-					g.mainlog().WithError(err).Info("Event server_change:start_server returned errors when starting")
+					g.mainlog().Fields(append(fields, "error", err)...).
+						Info("event server_change:start_server returned errors when starting")
 				}
 			}
 		}
@@ -376,7 +386,11 @@ func (g *guerrilla) subscribeEvents() {
 		if server, err := g.findServer(sc.ListenInterface); err == nil {
 			if server.state == ServerStateRunning {
 				server.Shutdown()
-				g.mainlog().Infof("Server [%s] stopped.", sc.ListenInterface)
+				g.mainlog().Fields(
+					"event", EventConfigServerStop,
+					"server", sc.ListenInterface,
+					"serverID", server.serverID).
+					Info("server stopped.")
 			}
 		}
 	})
@@ -386,24 +400,40 @@ func (g *guerrilla) subscribeEvents() {
 		if server, err := g.findServer(sc.ListenInterface); err == nil {
 			server.Shutdown()
 			g.removeServer(sc.ListenInterface)
-			g.mainlog().Infof("Server [%s] removed from config, stopped it.", sc.ListenInterface)
+			g.mainlog().Fields(
+				"event", EventConfigServerRemove,
+				"server", sc.ListenInterface,
+				"serverID", server.serverID).
+				Info("server removed from config, stopped it")
 		}
 	})
 
 	// TLS changes
 	events[EventConfigServerTLSConfig] = serverEvent(func(sc *ServerConfig) {
 		if server, err := g.findServer(sc.ListenInterface); err == nil {
+			fields := []interface{}{
+				"iface", server.listenInterface,
+				"serverID", server.serverID,
+				"event", EventConfigServerTLSConfig}
 			if err := server.configureTLS(); err == nil {
-				g.mainlog().Infof("Server [%s] new TLS configuration loaded", sc.ListenInterface)
+				g.mainlog().Fields(fields...).Info("server new TLS configuration loaded")
 			} else {
-				g.mainlog().WithError(err).Errorf("Server [%s] failed to load the new TLS configuration", sc.ListenInterface)
+				g.mainlog().Fields(append(fields, "error", err)...).
+					Error("Server failed to load the new TLS configuration")
 			}
 		}
 	})
 	// when server's timeout change.
 	events[EventConfigServerTimeout] = serverEvent(func(sc *ServerConfig) {
 		g.mapServers(func(server *server) {
+			fields := []interface{}{
+				"iface", server.listenInterface,
+				"serverID", server.serverID,
+				"event", EventConfigServerTimeout,
+				"timeout", sc.Timeout,
+			}
 			server.setTimeout(sc.Timeout)
+			g.mainlog().Fields(fields...).Info("server timeout set")
 		})
 	})
 	// when server's max clients change.
@@ -418,37 +448,42 @@ func (g *guerrilla) subscribeEvents() {
 			var err error
 			var l log.Logger
 			level := g.mainlog().GetLevel()
+			fields := []interface{}{
+				"iface", server.listenInterface,
+				"serverID", server.serverID,
+				"event", EventConfigServerLogFile,
+				"file", sc.LogFile,
+			}
 			if l, err = log.GetLogger(sc.LogFile, level); err == nil {
 				g.setMainlog(l)
 				backends.Svc.SetMainlog(l)
 				// it will change to the new logger on the next accepted client
 				server.logStore.Store(l)
-				g.mainlog().Infof("Server [%s] changed, new clients will log to: [%s]",
+				g.mainlog().Fields(fields...).Info("server log changed",
 					sc.ListenInterface,
 					sc.LogFile,
 				)
 			} else {
-				g.mainlog().WithError(err).Errorf(
-					"Server [%s] log change failed to: [%s]",
-					sc.ListenInterface,
-					sc.LogFile,
-				)
+				g.mainlog().Fields(append(fields, "error", err)...).Error(
+					"server log change failed")
 			}
 		}
 	})
 	// when the daemon caught a sighup, event for individual server
 	events[EventConfigServerLogReopen] = serverEvent(func(sc *ServerConfig) {
 		if server, err := g.findServer(sc.ListenInterface); err == nil {
+			fields := []interface{}{"file", sc.LogFile,
+				"iface", sc.ListenInterface,
+				"serverID", server.serverID,
+				"file", sc.LogFile,
+				"event", EventConfigServerLogReopen}
 			if err = server.log().Reopen(); err != nil {
 				g.mainlog().Fields(
-					"error", err,
-					"file", sc.LogFile,
-					"iface", sc.ListenInterface,
-					"serverID", server.serverID).
+					append(fields, "error", err)...).
 					Error("server log file failed to re-open")
 				return
 			}
-			g.mainlog().Infof("Server [%s] re-opened log file [%s]", sc.ListenInterface, sc.LogFile)
+			g.mainlog().Fields(fields).Info("server re-opened log file")
 		}
 	})
 
@@ -456,7 +491,8 @@ func (g *guerrilla) subscribeEvents() {
 	events[EventConfigServerGatewayConfig] = serverEvent(func(sc *ServerConfig) {
 		b := g.backend(sc.Gateway)
 		if b == nil {
-			g.mainlog().Fields("gateway", sc.Gateway).Error("could not change to gateway, not configured")
+			g.mainlog().Fields("gateway", sc.Gateway, "event", EventConfigServerGatewayConfig).
+				Error("could not change to gateway, not configured")
 			return
 		}
 		g.storeBackend(b)
@@ -464,18 +500,22 @@ func (g *guerrilla) subscribeEvents() {
 
 	revertIfError := func(err error, name string, logger log.Logger, g *guerrilla) {
 		if err != nil {
-			logger.Fields("error", err, "gateway", name).Error("cannot change gateway config, reverting to old config")
+			logger.Fields("error", err, "gateway", name, "event", EventConfigServerGatewayConfig).
+				Error("cannot change gateway config, reverting to old config")
 			err = g.backend(name).Reinitialize()
 			if err != nil {
-				logger.Fields("error", err, "gateway", name).Error("failed to revert to old gateway config")
+				logger.Fields("error", err, "gateway", name, "event", EventConfigServerGatewayConfig).
+					Error("failed to revert to old gateway config")
 				return
 			}
 			err = g.backend(name).Start()
 			if err != nil {
-				logger.Fields("error", err, "gateway", name).Error("failed to start gateway with old config")
+				logger.Fields("error", err, "gateway", name, "event", EventConfigServerGatewayConfig).
+					Error("failed to start gateway with old config")
 				return
 			}
-			logger.Fields("gateway", name).Info("reverted to old gateway config")
+			logger.Fields("gateway", name, "event", EventConfigServerGatewayConfig).
+				Info("reverted to old gateway config")
 		}
 	}
 
@@ -484,7 +524,8 @@ func (g *guerrilla) subscribeEvents() {
 		var err error
 		// shutdown the backend first.
 		if err = g.backend(name).Shutdown(); err != nil {
-			logger.Fields("error", err, "gateway", name).Error("gateway failed to shutdown")
+			logger.Fields("error", err, "gateway", name, "event", EventConfigBackendConfigChanged).
+				Error("gateway failed to shutdown")
 			return // we can't do anything then
 		}
 		if newBackend, newErr := backends.New(name, appConfig.BackendConfig, logger); newErr != nil {
@@ -493,11 +534,13 @@ func (g *guerrilla) subscribeEvents() {
 			return
 		} else {
 			if err = newBackend.Start(); err != nil {
-				logger.Fields("error", err, "gateway", name).Error("gateway could not start")
+				logger.Fields("error", err, "gateway", name, "event", EventConfigBackendConfigChanged).
+					Error("gateway could not start")
 				revertIfError(err, name, logger, g) // revert to old backend
 				return
 			} else {
-				logger.Fields("gateway", name).Info("gateway with new config started")
+				logger.Fields("gateway", name, "event", EventConfigBackendConfigChanged).
+					Info("gateway with new config started")
 				g.storeBackend(newBackend)
 			}
 		}
@@ -508,11 +551,13 @@ func (g *guerrilla) subscribeEvents() {
 		logger, _ := log.GetLogger(appConfig.LogFile, appConfig.LogLevel)
 		// shutdown any old backend first.
 		if newBackend, newErr := backends.New(name, appConfig.BackendConfig, logger); newErr != nil {
-			logger.Fields("error", newErr, "gateway", name).Error("cannot add new gateway")
+			logger.Fields("error", newErr, "gateway", name, "event", EventConfigBackendConfigAdded).
+				Error("cannot add new gateway")
 		} else {
 			// swap to the bew gateway (assuming old gateway was shutdown so it can be safely swapped)
 			if err := newBackend.Start(); err != nil {
-				logger.Fields("error", err, "gateway", name).Error("cannot start new gateway")
+				logger.Fields("error", err, "gateway", name, "event", EventConfigBackendConfigAdded).
+					Error("cannot start new gateway")
 			}
 			logger.Fields("gateway", name).Info("new gateway started")
 			g.storeBackend(newBackend)
@@ -527,18 +572,21 @@ func (g *guerrilla) subscribeEvents() {
 		// revert
 		defer revertIfError(err, name, logger, g)
 		if err = g.backend(name).Shutdown(); err != nil {
-			logger.Fields("error", err, "gateway", name).Warn("gateway failed to shutdown")
+			logger.Fields("error", err, "gateway", name, "event", EventConfigBackendConfigRemoved).
+				Error("gateway failed to shutdown")
 			return
 		}
 		g.removeBackend(g.backend(name))
-		logger.Fields("gateway", name).Info("gateway removed")
+		logger.Fields("gateway", name, "event", EventConfigBackendConfigRemoved).Info("gateway removed")
 	})
 
+	// subscribe all of the above events
 	var err error
 	for topic, fn := range events {
 		err = g.Subscribe(topic, fn)
 		if err != nil {
-			g.mainlog().WithError(err).Errorf("failed to subscribe on topic [%s]", topic)
+			g.mainlog().Fields("error", err, "event", topic).
+				Error("failed to subscribe on topic")
 			break
 		}
 	}
@@ -657,9 +705,9 @@ func (g *guerrilla) Shutdown() {
 		return b.Shutdown()
 	}); err != nil {
 		fmt.Println(err)
-		g.mainlog().WithError(err).Warn("Backend failed to shutdown")
+		g.mainlog().Fields("error", err).Error("backend failed to shutdown")
 	} else {
-		g.mainlog().Infof("Backend shutdown completed")
+		g.mainlog().Info("backend shutdown completed")
 	}
 }
 
@@ -680,7 +728,7 @@ func (g *guerrilla) writePid() (err error) {
 			}
 		}
 		if err != nil {
-			g.mainlog().Fields("error", err, "file", g.Config.PidFile).Errorf("error while writing pidFile")
+			g.mainlog().Fields("error", err, "file", g.Config.PidFile).Error("error while writing pidFile")
 		}
 	}()
 	if len(g.Config.PidFile) > 0 {

+ 1 - 1
server.go

@@ -416,7 +416,7 @@ func (s *server) handleClient(client *client) {
 		case ClientCmd:
 			client.bufin.setLimit(CommandLineMaxLength)
 			input, err := s.readCommand(client)
-			s.log().Fields("input", string(input)).Debugf("client said")
+			s.log().Fields("input", string(input)).Debug("client said")
 			if err == io.EOF {
 				s.log().Fields("error", err, "peer", client.RemoteIP).Warn("client closed the connection")
 				return

+ 8 - 8
server_test.go

@@ -49,7 +49,7 @@ func getMockServerConn(sc *ServerConfig, t *testing.T) (*mocks.Conn, *server) {
 	var mainlog log.Logger
 	mainlog, logOpenError = log.GetLogger(sc.LogFile, "debug")
 	if logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
+		mainlog.Fields("error", logOpenError, "iface", sc.ListenInterface).Error("Failed creating a logger for mock conn")
 	}
 
 	bcfg := backends.BackendConfig{
@@ -280,7 +280,7 @@ func TestHandleClient(t *testing.T) {
 	sc := getMockServerConfig()
 	mainlog, logOpenError = log.GetLogger(sc.LogFile, "debug")
 	if logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
+		mainlog.Fields("error", logOpenError, "iface", sc.ListenInterface).Error("Failed creating a logger for mock conn")
 	}
 	conn, server := getMockServerConn(sc, t)
 	// call the serve.handleClient() func in a goroutine.
@@ -320,7 +320,7 @@ func TestGithubIssue197(t *testing.T) {
 	sc := getMockServerConfig()
 	mainlog, logOpenError = log.GetLogger(sc.LogFile, "debug")
 	if logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
+		mainlog.Fields("error", logOpenError, "iface", sc.ListenInterface).Error("Failed creating a logger for mock conn")
 	}
 	conn, server := getMockServerConn(sc, t)
 	server.backend().Start()
@@ -422,7 +422,7 @@ func TestGithubIssue198(t *testing.T) {
 	backends.Svc.AddProcessor("custom", customBackend)
 
 	if logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
+		mainlog.Fields("error", logOpenError, "iface", sc.ListenInterface).Error("Failed creating a logger for mock conn")
 	}
 	conn, server := getMockServerConn(sc, t)
 	cfg := backends.BackendConfig{}
@@ -561,7 +561,7 @@ func TestGithubIssue199(t *testing.T) {
 	sc := getMockServerConfig()
 	mainlog, logOpenError = log.GetLogger(sc.LogFile, "debug")
 	if logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
+		mainlog.Fields("error", logOpenError, "iface", sc.ListenInterface).Error("Failed creating a logger for mock conn")
 	}
 	conn, server := getMockServerConn(sc, t)
 	server.backend().Start()
@@ -740,7 +740,7 @@ func TestGithubIssue200(t *testing.T) {
 	sc := getMockServerConfig()
 	mainlog, logOpenError = log.GetLogger(sc.LogFile, "debug")
 	if logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
+		mainlog.Fields("error", logOpenError, "iface", sc.ListenInterface).Error("Failed creating a logger for mock conn")
 	}
 	conn, server := getMockServerConn(sc, t)
 	server.backend().Start()
@@ -792,7 +792,7 @@ func TestGithubIssue201(t *testing.T) {
 	sc := getMockServerConfig()
 	mainlog, logOpenError = log.GetLogger(sc.LogFile, "debug")
 	if logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
+		mainlog.Fields("error", logOpenError, "iface", sc.ListenInterface).Error("Failed creating a logger for mock conn")
 	}
 	conn, server := getMockServerConn(sc, t)
 	server.backend().Start()
@@ -880,7 +880,7 @@ func TestXClient(t *testing.T) {
 	sc.XClientOn = true
 	mainlog, logOpenError = log.GetLogger(sc.LogFile, "debug")
 	if logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
+		mainlog.Fields("error", logOpenError, "iface", sc.ListenInterface).Error("Failed creating a logger for mock conn")
 	}
 	conn, server := getMockServerConn(sc, t)
 	// call the serve.handleClient() func in a goroutine.

+ 1 - 1
tests/guerrilla_test.go

@@ -273,7 +273,7 @@ func TestStart(t *testing.T) {
 			t.Error("Server did not shutdown pool on 127.0.0.1:2526")
 		}
 
-		if !MatchLog(logOutput, 1, "msg", "Backend shutdown completed") {
+		if !MatchLog(logOutput, 1, "msg", "backend shutdown completed") {
 			t.Error("Backend didn't shut down")
 		}
 	}

+ 1 - 1
tests/util.go

@@ -13,7 +13,7 @@ func MatchLog(input string, startLine int, args ...interface{}) bool {
 	}
 	lines := strings.Split(input, "\n")
 	if len(lines) < startLine {
-		panic("log too short, lines:" + string(len(lines)))
+		panic("log too short, lines:" + fmt.Sprintf("%v", len(lines)))
 	}
 	var lookFor string
 	// for each line