Explorar el Código

merge backend refactor into dashboard and fix conflicts. Also fix a bug in the dashboard that caused Safari to behave inconsistently with websockets"

Jordan Schalm hace 8 años
padre
commit
542726537d

+ 1 - 4
.travis.yml

@@ -23,7 +23,4 @@ install:
 script:
 script:
   - ./.travis.gofmt.sh
   - ./.travis.gofmt.sh
   - make guerrillad
   - make guerrillad
-  - go test ./tests
-  - go test
-  - go test ./cmd/guerrillad
-  - go test ./response
+  - make test

+ 9 - 0
Makefile

@@ -36,3 +36,12 @@ test: *.go */*.go */*/*.go
 	$(GO_VARS) $(GO) test -v ./tests
 	$(GO_VARS) $(GO) test -v ./tests
 	$(GO_VARS) $(GO) test -v ./cmd/guerrillad
 	$(GO_VARS) $(GO) test -v ./cmd/guerrillad
 	$(GO_VARS) $(GO) test -v ./response
 	$(GO_VARS) $(GO) test -v ./response
+	$(GO_VARS) $(GO) test -v ./backends
+	$(GO_VARS) $(GO) test -v ./mail
+
+testrace: *.go */*.go */*/*.go
+	$(GO_VARS) $(GO) test -v . -race
+	$(GO_VARS) $(GO) test -v ./tests -race
+	$(GO_VARS) $(GO) test -v ./cmd/guerrillad -race
+	$(GO_VARS) $(GO) test -v ./response -race
+	$(GO_VARS) $(GO) test -v ./backends -race

+ 86 - 23
README.md

@@ -14,20 +14,34 @@ It's a small SMTP server written in Go, for the purpose of receiving large volum
 Written for GuerrillaMail.com which processes hundreds of thousands of emails
 Written for GuerrillaMail.com which processes hundreds of thousands of emails
 every hour.
 every hour.
 
 
-The purpose of this daemon is to grab the email, save it to the database
-and disconnect as quickly as possible.
+The purpose of this daemon is to grab the email, save it,
+and disconnect as quickly as possible, essentially performing the services of a
+Mail Transfer Agent (MTA).
 
 
-A typical user of this software would probably want to look into
-`backends/guerrilla_db_redis.go` source file to use as an example to
-customize for their own systems.
+A typical user of this software would probably use it as a package in their own
+Go project in order to receive and deliver email.
 
 
-This server does not attempt to filter HTML, check for spam or do any
-sender verification. These steps should be performed by other programs,
- (or perhaps your own custom backend?).
-The server does not send any email including bounces.
+Go-Guerrilla allows you to customize how the email is delivered.
+
+Out of the box, Go-Guerrilla does not attempt to filter HTML, check for spam or do any
+sender verification. However, it comes with a modular middleware-like backend system which
+support a range of different features and ways of delivering email.
+See the list of available _Processors_ below.
 
 
 The software is using MIT License (MIT) - contributors welcome.
 The software is using MIT License (MIT) - contributors welcome.
 
 
+### Features
+
+- Multi-server. The daemon can spawn multiple servers at once, all sharing the same backend
+for saving email.
+- Config hot-reloading. Add/Remove/Enable/Disable servers without restarting. Reload TLS configuration, and most other settings on the fly.
+- Graceful shutdown: Minimise loss of email if you need to shutdown/restart.
+- Pooling: The daemon uses pooling where possible. It's friendly to the garbage collector.
+- Modular, component based, backend system for processing email that's easy to extend.  
+- Backend system arranged in a producer/consumer type structure, making use of Go's channels.
+- Fuzz tested.
+- Can be used as a package in your Go project.
+
 ### Roadmap / Contributing & Bounties
 ### Roadmap / Contributing & Bounties
 
 
 
 
@@ -172,20 +186,37 @@ Guerrilla SMTPd can also be imported and used as a package in your project.
 ## Import Guerrilla.
 ## Import Guerrilla.
 ```go
 ```go
 import "github.com/flashmob/go-guerrilla"
 import "github.com/flashmob/go-guerrilla"
+
+
 ```
 ```
 
 
 ## Implement the `Backend` interface
 ## Implement the `Backend` interface
 Or use one of the implementations in the `backends` sub-package). This is how
 Or use one of the implementations in the `backends` sub-package). This is how
 your application processes emails received by the Guerrilla app.
 your application processes emails received by the Guerrilla app.
 ```go
 ```go
+import "github.com/flashmob/go-guerrilla/mail"
+import "github.com/flashmob/go-guerrilla/backends"
+
 type CustomBackend struct {...}
 type CustomBackend struct {...}
 
 
-func (cb *CustomBackend) Process(c *guerrilla.Envelope) guerrilla.BackendResult {
-  err := saveSomewhere(c.Data)
+func (cb *CustomBackend) Process(e *mail.Envelope) backends.Result {
+  err := saveSomewhere(e.NewReader())
   if err != nil {
   if err != nil {
-    return guerrilla.NewBackendResult(fmt.Sprintf("554 Error: %s", err.Error()))
+    return guerrilla.NewResult(fmt.Sprintf("554 Error: %s", err.Error()))
   }
   }
-  return guerrilla.NewBackendResult("250 OK")
+  return guerrilla.NewResult("250 OK")
+}
+```
+
+## Create a logger
+
+```go
+import "github.com/flashmob/go-guerrilla/log"
+
+mainlog, err := log.GetLogger(log.OutputStderr.String());
+if  err != nil {
+    fmt.Println("Cannot open log:", err)
+    os.Exit(1)
 }
 }
 ```
 ```
 
 
@@ -193,11 +224,11 @@ func (cb *CustomBackend) Process(c *guerrilla.Envelope) guerrilla.BackendResult
 See Configuration section below for setting configuration options.
 See Configuration section below for setting configuration options.
 ```go
 ```go
 config := &guerrilla.AppConfig{
 config := &guerrilla.AppConfig{
-  Servers: []*guerrilla.ServerConfig{...},
+  Servers: []guerrilla.ServerConfig{...},
   AllowedHosts: []string{...}
   AllowedHosts: []string{...}
 }
 }
 backend := &CustomBackend{...}
 backend := &CustomBackend{...}
-app, err := guerrilla.New(config, backend)
+app, err := guerrilla.New(config, backend, mainlog)
 ```
 ```
 
 
 ## Start the app.
 ## Start the app.
@@ -282,15 +313,47 @@ The Json parser is very strict on syntax. If there's a parse error and it
 doesn't give much clue, then test your syntax here:
 doesn't give much clue, then test your syntax here:
 http://jsonlint.com/#
 http://jsonlint.com/#
 
 
-Email Saving Backends
+Email Processing Backend
 =====================
 =====================
 
 
-Backends provide for a modular way to save email and for the ability to
-extend this functionality. They can be swapped in or out via the config.
-Currently, the server comes with two example backends:
+The main job of a go-guerrilla backend is to validate recipients and deliver emails. The term
+"delivery" is often synonymous with saving email to secondary storage.
+
+The default backend implementation manages multiple workers. These workers are composed of
+smaller components called "Processors" which are chained using the config to perform a series of steps.
+Each processor specifies a distinct feature of behaviour. For example, a processor may save
+the emails to a particular storage system such as MySQL, or it may add additional headers before
+passing the email to the next _processor_.
+
+To extend or add a new feature, one would write a new Processor, then add it to the config.
+There are a few default _processors_ to get you started.
+
+### Documentation
+
+See the full documentation here:
+[About Backends: introduction, configuration, extending](https://github.com/flashmob/go-guerrilla/wiki/About-Backends:-introduction,-configuring-and-extending)
+
+### Included Processors
+
+| Processor | Description |
+|-----------|-------------|
+|Compressor|Sets a zlib compressor that other processors can use later|
+|Debugger|Logs the email envelope to help with testing|
+|Hasher|Processes each envelope to produce unique hashes to be used for ids later|
+|Header|Add a delivery header to the envelope|
+|HeadersParser|Parses MIME headers and also populates the Subject field of the envelope|
+|MySQL|Saves the emails to MySQL.|
+|Redis|Saves the email data to Redis.|
+|GuerrillaDbRedis|A 'monolithic' processor used at Guerrilla Mail; included for example
+
+### External Processors
+
+| Processor | Description |
+|-----------|-------------|
+|[MailDir](https://github.com/flashmob/maildir-processor)|Save emails to a maildir. [MailDiranasaurus](https://github.com/flashmob/maildiranasaurus) is an example project|
+|[FastCgi](https://github.com/flashmob/fastcgi-processor)|Deliver email directly to PHP-FPM or a similar FastCGI backend.
 
 
-- dummy : used for testing purposes
-- guerrilla_db_redis: example uses MySQL and Redis to store email, used on Guerrilla Mail
+Have a processor that you would like to share? Submit a PR to add it to the list!
 
 
 Web Dashboard
 Web Dashboard
 =============
 =============
@@ -300,8 +363,8 @@ An optional web-based dashboard is built into Go-Guerrilla. To use it, set the d
 Releases
 Releases
 ========
 ========
 
 
-(Master branch - Release Candidate 1 for v1.6)
-Large refactoring of the code.
+(Master branch - Release Candidate 1 for v2.0)
+Large refactoring of the code. 
 - Introduced "backends": modular architecture for saving email
 - Introduced "backends": modular architecture for saving email
 - Issue: Use as a package in your own projects! https://github.com/flashmob/go-guerrilla/issues/20
 - Issue: Use as a package in your own projects! https://github.com/flashmob/go-guerrilla/issues/20
 - Issue: Do not include dot-suffix in emails https://github.com/flashmob/go-guerrilla/issues/24
 - Issue: Do not include dot-suffix in emails https://github.com/flashmob/go-guerrilla/issues/24

+ 209 - 0
api.go

@@ -0,0 +1,209 @@
+package guerrilla
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/flashmob/go-guerrilla/backends"
+	"github.com/flashmob/go-guerrilla/log"
+	"io/ioutil"
+	"time"
+)
+
+type Daemon struct {
+	Config  *AppConfig
+	Logger  log.Logger
+	Backend backends.Backend
+
+	g Guerrilla
+
+	configLoadTime time.Time
+}
+
+const defaultInterface = "127.0.0.1:2525"
+
+// AddProcessor adds a processor constructor to the backend.
+// name is the identifier to be used in the config. See backends docs for more info.
+func (d *Daemon) AddProcessor(name string, pc backends.ProcessorConstructor) {
+	backends.Svc.AddProcessor(name, pc)
+}
+
+// Starts the daemon, initializing d.Config, d.Logger and d.Backend with defaults
+// can only be called once through the lifetime of the program
+func (d *Daemon) Start() (err error) {
+	if d.g == nil {
+		if d.Config == nil {
+			d.Config = &AppConfig{}
+		}
+		if err = d.configureDefaults(); err != nil {
+			return err
+		}
+		if d.Logger == nil {
+			d.Logger, err = log.GetLogger(d.Config.LogFile)
+			if err != nil {
+				return err
+			}
+			d.Logger.SetLevel(d.Config.LogLevel)
+		}
+		if d.Backend == nil {
+			d.Backend, err = backends.New(d.Config.BackendConfig, d.Logger)
+			if err != nil {
+				return err
+			}
+		}
+		d.g, err = New(d.Config, d.Backend, d.Logger)
+		if err != nil {
+			return err
+		}
+	}
+	err = d.g.Start()
+	if err == nil {
+		if err := d.resetLogger(); err == nil {
+			d.Log().Infof("main log configured to %s", d.Config.LogFile)
+		}
+
+	}
+	return err
+}
+
+// Shuts down the daemon, including servers and backend.
+// Do not call Start on it again, use a new server.
+func (d *Daemon) Shutdown() {
+	if d.g != nil {
+		d.g.Shutdown()
+	}
+}
+
+// LoadConfig reads in the config from a JSON file.
+func (d *Daemon) LoadConfig(path string) (AppConfig, error) {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return *d.Config, fmt.Errorf("Could not read config file: %s", err.Error())
+	}
+	d.Config = &AppConfig{}
+	if err := d.Config.Load(data); err != nil {
+		return *d.Config, err
+	}
+	d.configLoadTime = time.Now()
+	return *d.Config, nil
+}
+
+// SetConfig is same as LoadConfig, except you can pass AppConfig directly
+// does not emit any change events, instead use ReloadConfig after daemon has started
+func (d *Daemon) SetConfig(c *AppConfig) error {
+	// Config.Load takes []byte so we need to serialize
+	data, err := json.Marshal(c)
+	if err != nil {
+		return err
+	}
+	// put the data into a fresh d.Config
+	d.Config = &AppConfig{}
+	if err := d.Config.Load(data); err != nil {
+		return err
+	}
+	d.configLoadTime = time.Now()
+	return nil
+}
+
+// Reload a config using the passed in AppConfig and emit config change events
+func (d *Daemon) ReloadConfig(c *AppConfig) error {
+	if d.Config == nil {
+		return errors.New("d.Config nil")
+	}
+	oldConfig := *d.Config
+	err := d.SetConfig(c)
+	if err != nil {
+		d.Log().WithError(err).Error("Error while reloading config")
+		return err
+	} else {
+		d.Log().Infof("Configuration was reloaded at %s", d.configLoadTime)
+		d.Config.EmitChangeEvents(&oldConfig, d.g)
+	}
+	return nil
+}
+
+// Reload a config from a file and emit config change events
+func (d *Daemon) ReloadConfigFile(path string) error {
+	if d.Config == nil {
+		return errors.New("d.Config nil")
+	}
+	var oldConfig AppConfig
+	oldConfig = *d.Config
+	_, err := d.LoadConfig(path)
+	if err != nil {
+		d.Log().WithError(err).Error("Error while reloading config from file")
+		return err
+	} else {
+		d.Log().Infof("Configuration was reloaded at %s", d.configLoadTime)
+		d.Config.EmitChangeEvents(&oldConfig, d.g)
+	}
+	return nil
+}
+
+// ReopenLogs send events to re-opens all log files.
+// Typically, one would call this after rotating logs
+func (d *Daemon) ReopenLogs() {
+	d.Config.EmitLogReopenEvents(d.g)
+}
+
+// Subscribe for subscribing to config change events
+func (d *Daemon) Subscribe(topic Event, fn interface{}) error {
+	return d.g.Subscribe(topic, fn)
+}
+
+// for publishing config change events
+func (d *Daemon) Publish(topic Event, args ...interface{}) {
+	d.g.Publish(topic, args...)
+}
+
+// for unsubscribing from config change events
+func (d *Daemon) Unsubscribe(topic Event, handler interface{}) error {
+	return d.g.Unsubscribe(topic, handler)
+}
+
+// log returns a logger that implements our log.Logger interface.
+// level is set to "info" by default
+func (d *Daemon) Log() log.Logger {
+	if d.Logger != nil {
+		return d.Logger
+	}
+	out := log.OutputStderr.String()
+	if d.Config != nil && len(d.Config.LogFile) > 0 {
+		out = d.Config.LogFile
+	}
+	l, err := log.GetLogger(out)
+	if err == nil {
+		l.SetLevel("info")
+	}
+	return l
+
+}
+
+// set the default values for the servers and backend config options
+func (d *Daemon) configureDefaults() error {
+	err := d.Config.setDefaults()
+	if err != nil {
+		return err
+	}
+	if d.Backend == nil {
+		err = d.Config.setBackendDefaults()
+		if err != nil {
+			return err
+		}
+	}
+	return err
+}
+
+// resetLogger sets the logger to the one specified in the config.
+// This is because at the start, the daemon may be logging to stderr,
+// then attaches to the logs once the config is loaded.
+// This will propagate down to the servers / backend too.
+func (d *Daemon) resetLogger() error {
+	l, err := log.GetLogger(d.Config.LogFile)
+	if err != nil {
+		return err
+	}
+	d.Logger = l
+	d.g.SetLogger(d.Logger)
+	return nil
+}

+ 536 - 0
api_test.go

@@ -0,0 +1,536 @@
+package guerrilla
+
+import (
+	"bufio"
+	"fmt"
+	"github.com/flashmob/go-guerrilla/backends"
+	"github.com/flashmob/go-guerrilla/log"
+	"github.com/flashmob/go-guerrilla/mail"
+	"io/ioutil"
+	"net"
+	"os"
+	"strings"
+	"testing"
+	"time"
+)
+
+// Test Starting smtp without setting up logger / backend
+func TestSMTP(t *testing.T) {
+	go func() {
+		select {
+		case <-time.After(time.Second * 40):
+			//buf := make([]byte, 1<<16)
+			//stackSize := runtime.Stack(buf, true)
+			//fmt.Printf("%s\n", string(buf[0:stackSize]))
+			//panic("timeout")
+			t.Error("timeout")
+			return
+
+		}
+	}()
+
+	d := Daemon{}
+	err := d.Start()
+
+	if err != nil {
+		t.Error(err)
+	}
+	// it should set to stderr automatically
+	if d.Config.LogFile != log.OutputStderr.String() {
+		t.Error("smtp.config.LogFile is not", log.OutputStderr.String())
+	}
+
+	if len(d.Config.AllowedHosts) == 0 {
+		t.Error("smtp.config.AllowedHosts len should be 1, not 0", d.Config.AllowedHosts)
+	}
+
+	if d.Config.LogLevel != "debug" {
+		t.Error("smtp.config.LogLevel expected'debug', it is", d.Config.LogLevel)
+	}
+	if len(d.Config.Servers) != 1 {
+		t.Error("len(smtp.config.Servers) should be 1, got", len(d.Config.Servers))
+	}
+	time.Sleep(time.Second * 2)
+	d.Shutdown()
+
+}
+
+// Suppressing log output
+func TestSMTPNoLog(t *testing.T) {
+
+	// configure a default server with no log output
+	cfg := &AppConfig{LogFile: log.OutputOff.String()}
+	d := Daemon{Config: cfg}
+
+	err := d.Start()
+	if err != nil {
+		t.Error(err)
+	}
+	time.Sleep(time.Second * 2)
+	d.Shutdown()
+}
+
+// our custom server
+func TestSMTPCustomServer(t *testing.T) {
+	cfg := &AppConfig{LogFile: log.OutputOff.String()}
+	sc := ServerConfig{
+		ListenInterface: "127.0.0.1:2526",
+		IsEnabled:       true,
+	}
+	cfg.Servers = append(cfg.Servers, sc)
+	d := Daemon{Config: cfg}
+
+	err := d.Start()
+	if err != nil {
+		t.Error("start error", err)
+	} else {
+		time.Sleep(time.Second * 2)
+		d.Shutdown()
+	}
+
+}
+
+// with a backend config
+func TestSMTPCustomBackend(t *testing.T) {
+	cfg := &AppConfig{LogFile: log.OutputOff.String()}
+	sc := ServerConfig{
+		ListenInterface: "127.0.0.1:2526",
+		IsEnabled:       true,
+	}
+	cfg.Servers = append(cfg.Servers, sc)
+	bcfg := backends.BackendConfig{
+		"save_workers_size":  3,
+		"save_process":       "HeadersParser|Header|Hasher|Debugger",
+		"log_received_mails": true,
+		"primary_mail_host":  "example.com",
+	}
+	cfg.BackendConfig = bcfg
+	d := Daemon{Config: cfg}
+
+	err := d.Start()
+	if err != nil {
+		t.Error("start error", err)
+	} else {
+		time.Sleep(time.Second * 2)
+		d.Shutdown()
+	}
+}
+
+// with a config from a json file
+func TestSMTPLoadFile(t *testing.T) {
+	json := `{
+    "log_file" : "./tests/testlog",
+    "log_level" : "debug",
+    "pid_file" : "tests/go-guerrilla.pid",
+    "allowed_hosts": ["spam4.me","grr.la"],
+    "backend_config" :
+        {
+            "log_received_mails" : true,
+            "save_process": "HeadersParser|Header|Hasher|Debugger",
+            "save_workers_size":  3
+        },
+    "servers" : [
+        {
+            "is_enabled" : true,
+            "host_name":"mail.guerrillamail.com",
+            "max_size": 100017,
+            "private_key_file":"config_test.go",
+            "public_key_file":"config_test.go",
+            "timeout":160,
+            "listen_interface":"127.0.0.1:2526",
+            "start_tls_on":false,
+            "tls_always_on":false,
+            "max_clients": 2
+        }
+    ]
+}
+
+	`
+	json2 := `{
+    "log_file" : "./tests/testlog2",
+    "log_level" : "debug",
+    "pid_file" : "tests/go-guerrilla2.pid",
+    "allowed_hosts": ["spam4.me","grr.la"],
+    "backend_config" :
+        {
+            "log_received_mails" : true,
+            "save_process": "HeadersParser|Header|Hasher|Debugger",
+            "save_workers_size":  3
+        },
+    "servers" : [
+        {
+            "is_enabled" : true,
+            "host_name":"mail.guerrillamail.com",
+            "max_size": 100017,
+            "private_key_file":"config_test.go",
+            "public_key_file":"config_test.go",
+            "timeout":160,
+            "listen_interface":"127.0.0.1:2526",
+            "start_tls_on":false,
+            "tls_always_on":false,
+            "max_clients": 2
+        }
+    ]
+}
+
+	`
+	err := ioutil.WriteFile("goguerrilla.conf.api", []byte(json), 0644)
+	if err != nil {
+		t.Error("could not write guerrilla.conf.api", err)
+		return
+	}
+
+	d := Daemon{}
+	_, err = d.LoadConfig("goguerrilla.conf.api")
+	if err != nil {
+		t.Error("ReadConfig error", err)
+		return
+	}
+
+	err = d.Start()
+	if err != nil {
+		t.Error("start error", err)
+		return
+	} else {
+		time.Sleep(time.Second * 2)
+		if d.Config.LogFile != "./tests/testlog" {
+			t.Error("d.Config.LogFile != \"./tests/testlog\"")
+		}
+
+		if d.Config.PidFile != "tests/go-guerrilla.pid" {
+			t.Error("d.Config.LogFile != tests/go-guerrilla.pid")
+		}
+
+		err := ioutil.WriteFile("goguerrilla.conf.api", []byte(json2), 0644)
+		if err != nil {
+			t.Error("could not write guerrilla.conf.api", err)
+			return
+		}
+
+		d.ReloadConfigFile("goguerrilla.conf.api")
+
+		if d.Config.LogFile != "./tests/testlog2" {
+			t.Error("d.Config.LogFile != \"./tests/testlog\"")
+		}
+
+		if d.Config.PidFile != "tests/go-guerrilla2.pid" {
+			t.Error("d.Config.LogFile != \"go-guerrilla.pid\"")
+		}
+
+		d.Shutdown()
+	}
+}
+
+func TestReopenLog(t *testing.T) {
+	os.Truncate("test/testlog", 0)
+	cfg := &AppConfig{LogFile: "tests/testlog"}
+	sc := ServerConfig{
+		ListenInterface: "127.0.0.1:2526",
+		IsEnabled:       true,
+	}
+	cfg.Servers = append(cfg.Servers, sc)
+	d := Daemon{Config: cfg}
+
+	err := d.Start()
+	if err != nil {
+		t.Error("start error", err)
+	} else {
+		d.ReopenLogs()
+		time.Sleep(time.Second * 2)
+
+		d.Shutdown()
+	}
+
+	b, err := ioutil.ReadFile("tests/testlog")
+	if err != nil {
+		t.Error("could not read logfile")
+		return
+	}
+	if strings.Index(string(b), "re-opened log file") < 0 {
+		t.Error("Server log did not re-opened, expecting \"re-opened log file\"")
+	}
+	if strings.Index(string(b), "re-opened main log file") < 0 {
+		t.Error("Main log did not re-opened, expecting \"re-opened main log file\"")
+	}
+}
+
+func TestSetConfig(t *testing.T) {
+
+	os.Truncate("test/testlog", 0)
+	cfg := &AppConfig{LogFile: "tests/testlog"}
+	sc := ServerConfig{
+		ListenInterface: "127.0.0.1:2526",
+		IsEnabled:       true,
+	}
+	cfg.Servers = append(cfg.Servers, sc)
+	d := Daemon{Config: cfg}
+
+	// lets add a new server
+	sc.ListenInterface = "127.0.0.1:2527"
+	cfg.Servers = append(cfg.Servers, sc)
+
+	err := d.SetConfig(cfg)
+	if err != nil {
+		t.Error("SetConfig returned an error:", err)
+		return
+	}
+
+	err = d.Start()
+	if err != nil {
+		t.Error("start error", err)
+	} else {
+
+		time.Sleep(time.Second * 2)
+
+		d.Shutdown()
+	}
+
+	b, err := ioutil.ReadFile("tests/testlog")
+	if err != nil {
+		t.Error("could not read logfile")
+		return
+	}
+	//fmt.Println(string(b))
+	// has 127.0.0.1:2527 started?
+	if strings.Index(string(b), "127.0.0.1:2527") < 0 {
+		t.Error("expecting 127.0.0.1:2527 to start")
+	}
+
+}
+
+func TestSetConfigError(t *testing.T) {
+
+	os.Truncate("tests/testlog", 0)
+	cfg := &AppConfig{LogFile: "tests/testlog"}
+	sc := ServerConfig{
+		ListenInterface: "127.0.0.1:2526",
+		IsEnabled:       true,
+	}
+	cfg.Servers = append(cfg.Servers, sc)
+	d := Daemon{Config: cfg}
+
+	// lets add a new server with bad TLS
+	sc.ListenInterface = "127.0.0.1:2527"
+	sc.StartTLSOn = true
+	sc.PublicKeyFile = "tests/testlog" // totally wrong :->
+	sc.PublicKeyFile = "tests/testlog" // totally wrong :->
+
+	cfg.Servers = append(cfg.Servers, sc)
+
+	err := d.SetConfig(cfg)
+	if err == nil {
+		t.Error("SetConfig should have returned an error compalning about bad tls settings")
+		return
+	}
+}
+
+var funkyLogger = func() backends.Decorator {
+
+	backends.Svc.AddInitializer(
+		backends.InitializeWith(
+			func(backendConfig backends.BackendConfig) error {
+				backends.Log().Info("Funky logger is up & down to funk!")
+				return nil
+			}),
+	)
+
+	backends.Svc.AddShutdowner(
+		backends.ShutdownWith(
+			func() error {
+				backends.Log().Info("The funk has been stopped!")
+				return nil
+			}),
+	)
+
+	return func(p backends.Processor) backends.Processor {
+		return backends.ProcessWith(
+			func(e *mail.Envelope, task backends.SelectTask) (backends.Result, error) {
+				if task == backends.TaskValidateRcpt {
+					// validate the last recipient appended to e.Rcpt
+					backends.Log().Infof(
+						"another funky recipient [%s]",
+						e.RcptTo[len(e.RcptTo)-1])
+					// if valid then forward call to the next processor in the chain
+					return p.Process(e, task)
+					// if invalid, return a backend result
+					//return backends.NewResult(response.Canned.FailRcptCmd), nil
+				} else if task == backends.TaskSaveMail {
+					backends.Log().Info("Another funky email!")
+				}
+				return p.Process(e, task)
+			})
+	}
+}
+
+// How about a custom processor?
+func TestSetAddProcessor(t *testing.T) {
+	os.Truncate("tests/testlog", 0)
+	cfg := &AppConfig{
+		LogFile:      "tests/testlog",
+		AllowedHosts: []string{"grr.la"},
+		BackendConfig: backends.BackendConfig{
+			"save_process":     "HeadersParser|Debugger|FunkyLogger",
+			"validate_process": "FunkyLogger",
+		},
+	}
+	d := Daemon{Config: cfg}
+	d.AddProcessor("FunkyLogger", funkyLogger)
+
+	d.Start()
+	// lets have a talk with the server
+	talkToServer("127.0.0.1:2525")
+
+	d.Shutdown()
+
+	b, err := ioutil.ReadFile("tests/testlog")
+	if err != nil {
+		t.Error("could not read logfile")
+		return
+	}
+	// lets check for fingerprints
+	if strings.Index(string(b), "another funky recipient") < 0 {
+		t.Error("did not log: another funky recipient")
+	}
+
+	if strings.Index(string(b), "Another funky email!") < 0 {
+		t.Error("Did not log: Another funky email!")
+	}
+
+	if strings.Index(string(b), "Funky logger is up & down to funk") < 0 {
+		t.Error("Did not log: Funky logger is up & down to funk")
+	}
+	if strings.Index(string(b), "The funk has been stopped!") < 0 {
+		t.Error("Did not log:The funk has been stopped!")
+	}
+
+}
+
+func talkToServer(address string) {
+
+	conn, err := net.Dial("tcp", address)
+	if err != nil {
+
+		return
+	}
+	in := bufio.NewReader(conn)
+	str, err := in.ReadString('\n')
+	//	fmt.Println(str)
+	fmt.Fprint(conn, "HELO maildiranasaurustester\r\n")
+	str, err = in.ReadString('\n')
+	//	fmt.Println(str)
+	fmt.Fprint(conn, "MAIL FROM:<[email protected]>r\r\n")
+	str, err = in.ReadString('\n')
+	//	fmt.Println(str)
+	fmt.Fprint(conn, "RCPT TO:[email protected]\r\n")
+	str, err = in.ReadString('\n')
+	//	fmt.Println(str)
+	fmt.Fprint(conn, "DATA\r\n")
+	str, err = in.ReadString('\n')
+	//	fmt.Println(str)
+	fmt.Fprint(conn, "Subject: Test subject\r\n")
+	fmt.Fprint(conn, "\r\n")
+	fmt.Fprint(conn, "A an email body\r\n")
+	fmt.Fprint(conn, ".\r\n")
+	str, err = in.ReadString('\n')
+	//	fmt.Println(str)
+	_ = str
+}
+
+// Test hot config reload
+// Here we forgot to add FunkyLogger so backend will fail to init
+
+func TestReloadConfig(t *testing.T) {
+	os.Truncate("tests/testlog", 0)
+	d := Daemon{}
+	d.Start()
+
+	cfg := &AppConfig{
+		LogFile:      "tests/testlog",
+		AllowedHosts: []string{"grr.la"},
+		BackendConfig: backends.BackendConfig{
+			"save_process":     "HeadersParser|Debugger|FunkyLogger",
+			"validate_process": "FunkyLogger",
+		},
+	}
+	// Look mom, reloading the config without shutting down!
+	d.ReloadConfig(cfg)
+
+	d.Shutdown()
+}
+
+func TestPubSubAPI(t *testing.T) {
+
+	os.Truncate("tests/testlog", 0)
+
+	d := Daemon{Config: &AppConfig{LogFile: "tests/testlog"}}
+	d.Start()
+
+	// new config
+	cfg := &AppConfig{
+		PidFile:      "tests/pidfilex.pid",
+		LogFile:      "tests/testlog",
+		AllowedHosts: []string{"grr.la"},
+		BackendConfig: backends.BackendConfig{
+			"save_process":     "HeadersParser|Debugger|FunkyLogger",
+			"validate_process": "FunkyLogger",
+		},
+	}
+
+	var i = 0
+	pidEvHandler := func(c *AppConfig) {
+		i++
+		if i > 1 {
+			t.Error("number > 1, it means d.Unsubscribe didn't work")
+		}
+		d.Logger.Info("number", i)
+	}
+	d.Subscribe(EventConfigPidFile, pidEvHandler)
+
+	d.ReloadConfig(cfg)
+
+	d.Unsubscribe(EventConfigPidFile, pidEvHandler)
+	cfg.PidFile = "tests/pidfile2.pid"
+	d.Publish(EventConfigPidFile, cfg)
+	d.ReloadConfig(cfg)
+
+	b, err := ioutil.ReadFile("tests/testlog")
+	if err != nil {
+		t.Error("could not read logfile")
+		return
+	}
+	// lets interrogate the log
+	if strings.Index(string(b), "number1") < 0 {
+		t.Error("it lools like d.ReloadConfig(cfg) did not fire EventConfigPidFile, pidEvHandler not called")
+	}
+
+}
+
+func TestAPILog(t *testing.T) {
+	os.Truncate("tests/testlog", 0)
+	d := Daemon{}
+	l := d.Log()
+	l.Info("logtest1") // to stderr
+	if l.GetLevel() != "info" {
+		t.Error("Log level does not eq info, it is ", l.GetLevel())
+	}
+	d.Logger = nil
+	d.Config = &AppConfig{LogFile: "tests/testlog"}
+	l = d.Log()
+	l.Info("logtest1") // to tests/testlog
+
+	//
+	l = d.Log()
+	if l.GetLogDest() != "tests/testlog" {
+		t.Error("log dest is not tests/testlog, it was ", l.GetLogDest())
+	}
+
+	b, err := ioutil.ReadFile("tests/testlog")
+	if err != nil {
+		t.Error("could not read logfile")
+		return
+	}
+	// lets interrogate the log
+	if strings.Index(string(b), "logtest1") < 0 {
+		t.Error("hai was not found in the log, it should have been in tests/testlog")
+	}
+}

+ 0 - 157
backends/abstract.go

@@ -1,157 +0,0 @@
-package backends
-
-import (
-	"errors"
-	"fmt"
-	"github.com/flashmob/go-guerrilla/envelope"
-	"reflect"
-	"strings"
-)
-
-type AbstractBackend struct {
-	config abstractConfig
-	extend Backend
-}
-
-type abstractConfig struct {
-	LogReceivedMails bool `json:"log_received_mails"`
-}
-
-// Your backend should implement this method and set b.config field with a custom config struct
-// Therefore, your implementation would have your own custom config type instead of dummyConfig
-func (b *AbstractBackend) loadConfig(backendConfig BackendConfig) (err error) {
-	// Load the backend config for the backend. It has already been unmarshalled
-	// from the main config file 'backend' config "backend_config"
-	// Now we need to convert each type and copy into the dummyConfig struct
-	configType := baseConfig(&abstractConfig{})
-	bcfg, err := b.extractConfig(backendConfig, configType)
-	if err != nil {
-		return err
-	}
-	m := bcfg.(*abstractConfig)
-	b.config = *m
-	return nil
-}
-
-func (b *AbstractBackend) Initialize(config BackendConfig) error {
-	if b.extend != nil {
-		return b.extend.loadConfig(config)
-	}
-	err := b.loadConfig(config)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-func (b *AbstractBackend) Shutdown() error {
-	if b.extend != nil {
-		return b.extend.Shutdown()
-	}
-	return nil
-}
-
-func (b *AbstractBackend) Process(mail *envelope.Envelope) BackendResult {
-	if b.extend != nil {
-		return b.extend.Process(mail)
-	}
-	mail.ParseHeaders()
-
-	if b.config.LogReceivedMails {
-		mainlog.Infof("Mail from: %s / to: %v", mail.MailFrom.String(), mail.RcptTo)
-		mainlog.Info("Headers are: %s", mail.Header)
-
-	}
-	return NewBackendResult("250 OK")
-}
-
-func (b *AbstractBackend) saveMailWorker(saveMailChan chan *savePayload) {
-	if b.extend != nil {
-		b.extend.saveMailWorker(saveMailChan)
-		return
-	}
-	defer func() {
-		if r := recover(); r != nil {
-			// recover form closed channel
-			fmt.Println("Recovered in f", r)
-		}
-		// close any connections / files
-		// ...
-
-	}()
-	for {
-		payload := <-saveMailChan
-		if payload == nil {
-			mainlog.Debug("No more saveMailChan payload")
-			return
-		}
-		// process the email here
-		result := b.Process(payload.mail)
-		// if all good
-		if result.Code() < 300 {
-			payload.savedNotify <- &saveStatus{nil, "s0m3l337Ha5hva1u3LOL"}
-		} else {
-			payload.savedNotify <- &saveStatus{errors.New(result.String()), "s0m3l337Ha5hva1u3LOL"}
-		}
-
-	}
-}
-
-func (b *AbstractBackend) getNumberOfWorkers() int {
-	if b.extend != nil {
-		return b.extend.getNumberOfWorkers()
-	}
-	return 1
-}
-
-func (b *AbstractBackend) testSettings() error {
-	if b.extend != nil {
-		return b.extend.testSettings()
-	}
-	return nil
-}
-
-// Load the backend config for the backend. It has already been unmarshalled
-// from the main config file 'backend' config "backend_config"
-// Now we need to convert each type and copy into the guerrillaDBAndRedisConfig struct
-// The reason why using reflection is because we'll get a nice error message if the field is missing
-// the alternative solution would be to json.Marshal() and json.Unmarshal() however that will not give us any
-// error messages
-func (h *AbstractBackend) extractConfig(configData BackendConfig, configType baseConfig) (interface{}, error) {
-	// Use reflection so that we can provide a nice error message
-	s := reflect.ValueOf(configType).Elem() // so that we can set the values
-	m := reflect.ValueOf(configType).Elem()
-	t := reflect.TypeOf(configType).Elem()
-	typeOfT := s.Type()
-
-	for i := 0; i < m.NumField(); i++ {
-		f := s.Field(i)
-		// read the tags of the config struct
-		field_name := t.Field(i).Tag.Get("json")
-		if len(field_name) > 0 {
-			// parse the tag to
-			// get the field name from struct tag
-			split := strings.Split(field_name, ",")
-			field_name = split[0]
-		} else {
-			// could have no tag
-			// so use the reflected field name
-			field_name = typeOfT.Field(i).Name
-		}
-		if f.Type().Name() == "int" {
-			if intVal, converted := configData[field_name].(float64); converted {
-				s.Field(i).SetInt(int64(intVal))
-			} else {
-				return configType, convertError("property missing/invalid: '" + field_name + "' of expected type: " + f.Type().Name())
-			}
-		}
-		if f.Type().Name() == "string" {
-			if stringVal, converted := configData[field_name].(string); converted {
-				s.Field(i).SetString(stringVal)
-			} else {
-				return configType, convertError("missing/invalid: '" + field_name + "' of type: " + f.Type().Name())
-			}
-		}
-	}
-	return configType, nil
-}

+ 207 - 137
backends/backend.go

@@ -1,77 +1,80 @@
 package backends
 package backends
 
 
 import (
 import (
-	"errors"
 	"fmt"
 	"fmt"
+	"github.com/flashmob/go-guerrilla/log"
+	"github.com/flashmob/go-guerrilla/mail"
+	"reflect"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
-	"time"
+	"sync/atomic"
+)
 
 
-	"github.com/flashmob/go-guerrilla/envelope"
-	"github.com/flashmob/go-guerrilla/log"
-	"github.com/flashmob/go-guerrilla/response"
+var (
+	Svc *service
+
+	// Store the constructor for making an new processor decorator.
+	processors map[string]ProcessorConstructor
+
+	b Backend
 )
 )
 
 
-var mainlog log.Logger
+func init() {
+	Svc = &service{}
+	processors = make(map[string]ProcessorConstructor)
+}
+
+type ProcessorConstructor func() Decorator
 
 
 // Backends process received mail. Depending on the implementation, they can store mail in the database,
 // Backends process received mail. Depending on the implementation, they can store mail in the database,
 // write to a file, check for spam, re-transmit to another server, etc.
 // write to a file, check for spam, re-transmit to another server, etc.
 // Must return an SMTP message (i.e. "250 OK") and a boolean indicating
 // Must return an SMTP message (i.e. "250 OK") and a boolean indicating
 // whether the message was processed successfully.
 // whether the message was processed successfully.
 type Backend interface {
 type Backend interface {
-	// Public methods
-	Process(*envelope.Envelope) BackendResult
+	// Process processes then saves the mail envelope
+	Process(*mail.Envelope) Result
+	// ValidateRcpt validates the last recipient that was pushed to the mail envelope
+	ValidateRcpt(e *mail.Envelope) RcptError
+	// Initializes the backend, eg. creates folders, sets-up database connections
 	Initialize(BackendConfig) error
 	Initialize(BackendConfig) error
+	// Initializes the backend after it was Shutdown()
+	Reinitialize() error
+	// Shutdown frees / closes anything created during initializations
 	Shutdown() error
 	Shutdown() error
-
-	// start save mail worker(s)
-	saveMailWorker(chan *savePayload)
-	// get the number of workers that will be stared
-	getNumberOfWorkers() int
-	// test database settings, permissions, correct paths, etc, before starting workers
-	testSettings() error
-	// parse the configuration files
-	loadConfig(BackendConfig) error
+	// Start Starts a backend that has been initialized
+	Start() error
 }
 }
 
 
 type BackendConfig map[string]interface{}
 type BackendConfig map[string]interface{}
 
 
-var backends = map[string]Backend{}
+// All config structs extend from this
+type BaseConfig interface{}
 
 
-type baseConfig interface{}
-
-type saveStatus struct {
-	err  error
-	hash string
+type notifyMsg struct {
+	err      error
+	queuedID string
 }
 }
 
 
-type savePayload struct {
-	mail        *envelope.Envelope
-	from        *envelope.EmailAddress
-	recipient   *envelope.EmailAddress
-	savedNotify chan *saveStatus
-}
-
-// BackendResult represents a response to an SMTP client after receiving DATA.
+// Result represents a response to an SMTP client after receiving DATA.
 // The String method should return an SMTP message ready to send back to the
 // The String method should return an SMTP message ready to send back to the
 // client, for example `250 OK: Message received`.
 // client, for example `250 OK: Message received`.
-type BackendResult interface {
+type Result interface {
 	fmt.Stringer
 	fmt.Stringer
 	// Code should return the SMTP code associated with this response, ie. `250`
 	// Code should return the SMTP code associated with this response, ie. `250`
 	Code() int
 	Code() int
 }
 }
 
 
 // Internal implementation of BackendResult for use by backend implementations.
 // Internal implementation of BackendResult for use by backend implementations.
-type backendResult string
+type result string
 
 
-func (br backendResult) String() string {
+func (br result) String() string {
 	return string(br)
 	return string(br)
 }
 }
 
 
 // Parses the SMTP code from the first 3 characters of the SMTP message.
 // Parses the SMTP code from the first 3 characters of the SMTP message.
 // Returns 554 if code cannot be parsed.
 // Returns 554 if code cannot be parsed.
-func (br backendResult) Code() int {
+func (br result) Code() int {
 	trimmed := strings.TrimSpace(string(br))
 	trimmed := strings.TrimSpace(string(br))
 	if len(trimmed) < 3 {
 	if len(trimmed) < 3 {
 		return 554
 		return 554
@@ -83,134 +86,201 @@ func (br backendResult) Code() int {
 	return code
 	return code
 }
 }
 
 
-func NewBackendResult(message string) BackendResult {
-	return backendResult(message)
+func NewResult(message string) Result {
+	return result(message)
 }
 }
 
 
-// A backend gateway is a proxy that implements the Backend interface.
-// It is used to start multiple goroutine workers for saving mail, and then distribute email saving to the workers
-// via a channel. Shutting down via Shutdown() will stop all workers.
-// The rest of this program always talks to the backend via this gateway.
-type BackendGateway struct {
-	AbstractBackend
-	saveMailChan chan *savePayload
-	// waits for backend workers to start/stop
-	wg sync.WaitGroup
-	b  Backend
-	// controls access to state
-	stateGuard sync.Mutex
-	State      backendState
-	config     BackendConfig
+type processorInitializer interface {
+	Initialize(backendConfig BackendConfig) error
 }
 }
 
 
-// possible values for state
-const (
-	BackendStateRunning = iota
-	BackendStateShuttered
-	BackendStateError
-)
+type processorShutdowner interface {
+	Shutdown() error
+}
 
 
-type backendState int
+type InitializeWith func(backendConfig BackendConfig) error
+type ShutdownWith func() error
 
 
-func (s backendState) String() string {
-	return strconv.Itoa(int(s))
+// Satisfy ProcessorInitializer interface
+// So we can now pass an anonymous function that implements ProcessorInitializer
+func (i InitializeWith) Initialize(backendConfig BackendConfig) error {
+	// delegate to the anonymous function
+	return i(backendConfig)
 }
 }
 
 
-// New retrieve a backend specified by the backendName, and initialize it using
-// backendConfig
-func New(backendName string, backendConfig BackendConfig, l log.Logger) (Backend, error) {
-	backend, found := backends[backendName]
-	mainlog = l
-	if !found {
-		return nil, fmt.Errorf("backend %q not found", backendName)
+// satisfy ProcessorShutdowner interface, same concept as InitializeWith type
+func (s ShutdownWith) Shutdown() error {
+	// delegate
+	return s()
+}
+
+type Errors []error
+
+// implement the Error interface
+func (e Errors) Error() string {
+	if len(e) == 1 {
+		return e[0].Error()
 	}
 	}
-	gateway := &BackendGateway{b: backend, config: backendConfig}
-	err := gateway.Initialize(backendConfig)
-	if err != nil {
-		return nil, fmt.Errorf("error while initializing the backend: %s", err)
+	// multiple errors
+	msg := ""
+	for _, err := range e {
+		msg += "\n" + err.Error()
 	}
 	}
-	gateway.State = BackendStateRunning
-	return gateway, nil
+	return msg
 }
 }
 
 
-// Process distributes an envelope to one of the backend workers
-func (gw *BackendGateway) Process(e *envelope.Envelope) BackendResult {
-	if gw.State != BackendStateRunning {
-		return NewBackendResult(response.Canned.FailBackendNotRunning + gw.State.String())
-	}
+func convertError(name string) error {
+	return fmt.Errorf("failed to load backend config (%s)", name)
+}
 
 
-	to := e.RcptTo
-	from := e.MailFrom
-
-	// place on the channel so that one of the save mail workers can pick it up
-	// TODO: support multiple recipients
-	savedNotify := make(chan *saveStatus)
-	gw.saveMailChan <- &savePayload{e, &from, &to[0], savedNotify}
-	// wait for the save to complete
-	// or timeout
-	select {
-	case status := <-savedNotify:
-		if status.err != nil {
-			return NewBackendResult(response.Canned.FailBackendTransaction + status.err.Error())
-		}
-		return NewBackendResult(response.Canned.SuccessMessageQueued + status.hash)
+type service struct {
+	initializers []processorInitializer
+	shutdowners  []processorShutdowner
+	sync.Mutex
+	mainlog atomic.Value
+}
 
 
-	case <-time.After(time.Second * 30):
-		mainlog.Infof("Backend has timed out")
-		return NewBackendResult(response.Canned.FailBackendTimeout)
+// Get loads the log.logger in an atomic operation. Returns a stderr logger if not able to load
+func Log() log.Logger {
+	if v, ok := Svc.mainlog.Load().(log.Logger); ok {
+		return v
 	}
 	}
+	l, _ := log.GetLogger(log.OutputStderr.String())
+	return l
+}
+
+func (s *service) SetMainlog(l log.Logger) {
+	s.mainlog.Store(l)
+}
+
+// AddInitializer adds a function that implements ProcessorShutdowner to be called when initializing
+func (s *service) AddInitializer(i processorInitializer) {
+	s.Lock()
+	defer s.Unlock()
+	s.initializers = append(s.initializers, i)
+}
+
+// AddShutdowner adds a function that implements ProcessorShutdowner to be called when shutting down
+func (s *service) AddShutdowner(sh processorShutdowner) {
+	s.Lock()
+	defer s.Unlock()
+	s.shutdowners = append(s.shutdowners, sh)
 }
 }
-func (gw *BackendGateway) Shutdown() error {
-	gw.stateGuard.Lock()
-	defer gw.stateGuard.Unlock()
-	if gw.State != BackendStateShuttered {
-		err := gw.b.Shutdown()
-		if err == nil {
-			close(gw.saveMailChan) // workers will stop
-			gw.wg.Wait()
-			gw.State = BackendStateShuttered
+
+// reset clears the initializers and Shutdowners
+func (s *service) reset() {
+	s.shutdowners = make([]processorShutdowner, 0)
+	s.initializers = make([]processorInitializer, 0)
+}
+
+// Initialize initializes all the processors one-by-one and returns any errors.
+// Subsequent calls to Initialize will not call the initializer again unless it failed on the previous call
+// so Initialize may be called again to retry after getting errors
+func (s *service) initialize(backend BackendConfig) Errors {
+	s.Lock()
+	defer s.Unlock()
+	var errors Errors
+	failed := make([]processorInitializer, 0)
+	for i := range s.initializers {
+		if err := s.initializers[i].Initialize(backend); err != nil {
+			errors = append(errors, err)
+			failed = append(failed, s.initializers[i])
 		}
 		}
-		return err
 	}
 	}
-	return nil
+	// keep only the failed initializers
+	s.initializers = failed
+	return errors
 }
 }
 
 
-// Reinitialize starts up a backend gateway that was shutdown before
-func (gw *BackendGateway) Reinitialize() error {
-	if gw.State != BackendStateShuttered {
-		return errors.New("backend must be in BackendStateshuttered state to Reinitialize")
+// Shutdown shuts down all the processors by calling their shutdowners (if any)
+// Subsequent calls to Shutdown will not call the shutdowners again unless it failed on the previous call
+// so Shutdown may be called again to retry after getting errors
+func (s *service) shutdown() Errors {
+	s.Lock()
+	defer s.Unlock()
+	var errors Errors
+	failed := make([]processorShutdowner, 0)
+	for i := range s.shutdowners {
+		if err := s.shutdowners[i].Shutdown(); err != nil {
+			errors = append(errors, err)
+			failed = append(failed, s.shutdowners[i])
+		}
 	}
 	}
-	err := gw.Initialize(gw.config)
-	if err != nil {
-		return fmt.Errorf("error while initializing the backend: %s", err)
+	s.shutdowners = failed
+	return errors
+}
+
+// AddProcessor adds a new processor, which becomes available to the backend_config.save_process option
+// and also the backend_config.validate_process option
+// Use to add your own custom processor when using backends as a package, or after importing an external
+// processor.
+func (s *service) AddProcessor(name string, p ProcessorConstructor) {
+	// wrap in a constructor since we want to defer calling it
+	var c ProcessorConstructor
+	c = func() Decorator {
+		return p()
 	}
 	}
-	gw.State = BackendStateRunning
-	return err
+	// add to our processors list
+	processors[strings.ToLower(name)] = c
 }
 }
 
 
-func (gw *BackendGateway) Initialize(cfg BackendConfig) error {
-	err := gw.b.Initialize(cfg)
-	if err == nil {
-		workersSize := gw.b.getNumberOfWorkers()
-		if workersSize < 1 {
-			gw.State = BackendStateError
-			return errors.New("Must have at least 1 worker")
+// extractConfig loads the backend config. It has already been unmarshalled
+// configData contains data from the main config file's "backend_config" value
+// configType is a Processor's specific config value.
+// The reason why using reflection is because we'll get a nice error message if the field is missing
+// the alternative solution would be to json.Marshal() and json.Unmarshal() however that will not give us any
+// error messages
+func (s *service) ExtractConfig(configData BackendConfig, configType BaseConfig) (interface{}, error) {
+	// Use reflection so that we can provide a nice error message
+	v := reflect.ValueOf(configType).Elem() // so that we can set the values
+	//m := reflect.ValueOf(configType).Elem()
+	t := reflect.TypeOf(configType).Elem()
+	typeOfT := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := v.Field(i)
+		// read the tags of the config struct
+		field_name := t.Field(i).Tag.Get("json")
+		omitempty := false
+		if len(field_name) > 0 {
+			// parse the tag to
+			// get the field name from struct tag
+			split := strings.Split(field_name, ",")
+			field_name = split[0]
+			if len(split) > 1 {
+				if split[1] == "omitempty" {
+					omitempty = true
+				}
+			}
+		} else {
+			// could have no tag
+			// so use the reflected field name
+			field_name = typeOfT.Field(i).Name
+		}
+		if f.Type().Name() == "int" {
+			// in json, there is no int, only floats...
+			if intVal, converted := configData[field_name].(float64); converted {
+				v.Field(i).SetInt(int64(intVal))
+			} else if intVal, converted := configData[field_name].(int); converted {
+				v.Field(i).SetInt(int64(intVal))
+			} else if !omitempty {
+				return configType, convertError("property missing/invalid: '" + field_name + "' of expected type: " + f.Type().Name())
+			}
 		}
 		}
-		if err := gw.b.testSettings(); err != nil {
-			gw.State = BackendStateError
-			return err
+		if f.Type().Name() == "string" {
+			if stringVal, converted := configData[field_name].(string); converted {
+				v.Field(i).SetString(stringVal)
+			} else if !omitempty {
+				return configType, convertError("missing/invalid: '" + field_name + "' of type: " + f.Type().Name())
+			}
 		}
 		}
-		gw.saveMailChan = make(chan *savePayload, workersSize)
-		// start our savemail workers
-		gw.wg.Add(workersSize)
-		for i := 0; i < workersSize; i++ {
-			go func() {
-				gw.b.saveMailWorker(gw.saveMailChan)
-				gw.wg.Done()
-			}()
+		if f.Type().Name() == "bool" {
+			if boolVal, converted := configData[field_name].(bool); converted {
+				v.Field(i).SetBool(boolVal)
+			} else if !omitempty {
+				return configType, convertError("missing/invalid: '" + field_name + "' of type: " + f.Type().Name())
+			}
 		}
 		}
-	} else {
-		gw.State = BackendStateError
 	}
 	}
-	return err
+	return configType, nil
 }
 }

+ 13 - 0
backends/decorate.go

@@ -0,0 +1,13 @@
+package backends
+
+// We define what a decorator to our processor will look like
+type Decorator func(Processor) Processor
+
+// Decorate will decorate a processor with a slice of passed decorators
+func Decorate(c Processor, ds ...Decorator) Processor {
+	decorated := c
+	for _, decorate := range ds {
+		decorated = decorate(decorated)
+	}
+	return decorated
+}

+ 0 - 37
backends/dummy.go

@@ -1,37 +0,0 @@
-package backends
-
-func init() {
-	// decorator pattern
-	backends["dummy"] = &AbstractBackend{
-		extend: &DummyBackend{},
-	}
-}
-
-// custom configuration we will parse from the json
-// see guerrillaDBAndRedisConfig struct for a more complete example
-type dummyConfig struct {
-	LogReceivedMails bool `json:"log_received_mails"`
-}
-
-// putting all the paces we need together
-type DummyBackend struct {
-	config dummyConfig
-	// embed functions form AbstractBackend so that DummyBackend satisfies the Backend interface
-	AbstractBackend
-}
-
-// Backends should implement this method and set b.config field with a custom config struct
-// Therefore, your implementation would have a custom config type instead of dummyConfig
-func (b *DummyBackend) loadConfig(backendConfig BackendConfig) (err error) {
-	// Load the backend config for the backend. It has already been unmarshalled
-	// from the main config file 'backend' config "backend_config"
-	// Now we need to convert each type and copy into the dummyConfig struct
-	configType := baseConfig(&dummyConfig{})
-	bcfg, err := b.extractConfig(backendConfig, configType)
-	if err != nil {
-		return err
-	}
-	m := bcfg.(*dummyConfig)
-	b.config = *m
-	return nil
-}

+ 397 - 0
backends/gateway.go

@@ -0,0 +1,397 @@
+package backends
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/flashmob/go-guerrilla/log"
+	"github.com/flashmob/go-guerrilla/mail"
+	"github.com/flashmob/go-guerrilla/response"
+	"runtime/debug"
+	"strings"
+)
+
+var ErrProcessorNotFound error
+
+// A backend gateway is a proxy that implements the Backend interface.
+// It is used to start multiple goroutine workers for saving mail, and then distribute email saving to the workers
+// via a channel. Shutting down via Shutdown() will stop all workers.
+// The rest of this program always talks to the backend via this gateway.
+type BackendGateway struct {
+	// channel for distributing envelopes to workers
+	conveyor chan *workerMsg
+
+	// waits for backend workers to start/stop
+	wg           sync.WaitGroup
+	workStoppers []chan bool
+	processors   []Processor
+	validators   []Processor
+
+	// controls access to state
+	sync.Mutex
+	State    backendState
+	config   BackendConfig
+	gwConfig *GatewayConfig
+}
+
+type GatewayConfig struct {
+	// WorkersSize controls how many concurrent workers to start. Defaults to 1
+	WorkersSize int `json:"save_workers_size,omitempty"`
+	// SaveProcess controls which processors to chain in a stack for saving email tasks
+	SaveProcess string `json:"save_process,omitempty"`
+	// ValidateProcess is like ProcessorStack, but for recipient validation tasks
+	ValidateProcess string `json:"validate_process,omitempty"`
+	// TimeoutSave is the number of seconds before timeout when saving an email
+	TimeoutSave int `json:"gw_save_timeout,omitempty"`
+	// TimeoutValidateRcpt is how many seconds before timeout when validating a recipient
+	TimeoutValidateRcpt int `json:"gw_val_rcpt_timeout,omitempty"`
+}
+
+// workerMsg is what get placed on the BackendGateway.saveMailChan channel
+type workerMsg struct {
+	// The email data
+	e *mail.Envelope
+	// notifyMe is used to notify the gateway of workers finishing their processing
+	notifyMe chan *notifyMsg
+	// select the task type
+	task SelectTask
+}
+
+type backendState int
+
+// possible values for state
+const (
+	BackendStateNew backendState = iota
+	BackendStateRunning
+	BackendStateShuttered
+	BackendStateError
+	BackendStateInitialized
+
+	// default timeout for saving email, if 'gw_save_timeout' not present in config
+	saveTimeout = time.Second * 30
+	// default timeout for validating rcpt to, if 'gw_val_rcpt_timeout' not present in config
+	validateRcptTimeout = time.Second * 5
+	defaultProcessor    = "Debugger"
+)
+
+func (s backendState) String() string {
+	switch s {
+	case BackendStateNew:
+		return "NewState"
+	case BackendStateRunning:
+		return "RunningState"
+	case BackendStateShuttered:
+		return "ShutteredState"
+	case BackendStateError:
+		return "ErrorSate"
+	case BackendStateInitialized:
+		return "InitializedState"
+	}
+	return strconv.Itoa(int(s))
+}
+
+// New makes a new default BackendGateway backend, and initializes it using
+// backendConfig and stores the logger
+func New(backendConfig BackendConfig, l log.Logger) (Backend, error) {
+	Svc.SetMainlog(l)
+	gateway := &BackendGateway{}
+	err := gateway.Initialize(backendConfig)
+	if err != nil {
+		return nil, fmt.Errorf("error while initializing the backend: %s", err)
+	}
+	// keep the config known to be good.
+	gateway.config = backendConfig
+
+	b = Backend(gateway)
+	return b, nil
+}
+
+// Process distributes an envelope to one of the backend workers
+func (gw *BackendGateway) Process(e *mail.Envelope) Result {
+	if gw.State != BackendStateRunning {
+		return NewResult(response.Canned.FailBackendNotRunning + gw.State.String())
+	}
+	// place on the channel so that one of the save mail workers can pick it up
+	savedNotify := make(chan *notifyMsg)
+	gw.conveyor <- &workerMsg{e, savedNotify, TaskSaveMail}
+	// wait for the save to complete
+	// or timeout
+	select {
+	case status := <-savedNotify:
+		if status.err != nil {
+			return NewResult(response.Canned.FailBackendTransaction + status.err.Error())
+		}
+		return NewResult(response.Canned.SuccessMessageQueued + status.queuedID)
+
+	case <-time.After(gw.saveTimeout()):
+		Log().Error("Backend has timed out while saving eamil")
+		return NewResult(response.Canned.FailBackendTimeout)
+	}
+}
+
+// ValidateRcpt asks one of the workers to validate the recipient
+// Only the last recipient appended to e.RcptTo will be validated.
+func (gw *BackendGateway) ValidateRcpt(e *mail.Envelope) RcptError {
+	if gw.State != BackendStateRunning {
+		return StorageNotAvailable
+	}
+	// place on the channel so that one of the save mail workers can pick it up
+	notify := make(chan *notifyMsg)
+	gw.conveyor <- &workerMsg{e, notify, TaskValidateRcpt}
+	// wait for the validation to complete
+	// or timeout
+	select {
+	case status := <-notify:
+		if status.err != nil {
+			return status.err
+		}
+		return nil
+
+	case <-time.After(gw.validateRcptTimeout()):
+		Log().Error("Backend has timed out while validating rcpt")
+		return StorageTimeout
+	}
+}
+
+// Shutdown shuts down the backend and leaves it in BackendStateShuttered state
+func (gw *BackendGateway) Shutdown() error {
+	gw.Lock()
+	defer gw.Unlock()
+	if gw.State != BackendStateShuttered {
+		// send a signal to all workers
+		gw.stopWorkers()
+		// wait for workers to stop
+		gw.wg.Wait()
+		// call shutdown on all processor shutdowners
+		if err := Svc.shutdown(); err != nil {
+			return err
+		}
+		gw.State = BackendStateShuttered
+	}
+	return nil
+}
+
+// Reinitialize initializes the gateway with the existing config after it was shutdown
+func (gw *BackendGateway) Reinitialize() error {
+	if gw.State != BackendStateShuttered {
+		return errors.New("backend must be in BackendStateshuttered state to Reinitialize")
+	}
+	//
+	Svc.reset()
+
+	err := gw.Initialize(gw.config)
+	if err != nil {
+		fmt.Println("reinitialize to ", gw.config, err)
+		return fmt.Errorf("error while initializing the backend: %s", err)
+	}
+
+	return err
+}
+
+// newChain creates a new Processor by chaining multiple Processors in a call stack
+// Decorators are functions of Decorator type, source files prefixed with p_*
+// Each decorator does a specific task during the processing stage.
+// This function uses the config value save_process or validate_process to figure out which Decorator to use
+func (gw *BackendGateway) newStack(stackConfig string) (Processor, error) {
+	var decorators []Decorator
+	cfg := strings.ToLower(strings.TrimSpace(stackConfig))
+	if len(cfg) == 0 {
+		cfg = strings.ToLower(defaultProcessor)
+	}
+	items := strings.Split(cfg, "|")
+	for i := range items {
+		name := items[len(items)-1-i] // reverse order, since decorators are stacked
+		if makeFunc, ok := processors[name]; ok {
+			decorators = append(decorators, makeFunc())
+		} else {
+			ErrProcessorNotFound = errors.New(fmt.Sprintf("processor [%s] not found", name))
+			return nil, ErrProcessorNotFound
+		}
+	}
+	// build the call-stack of decorators
+	p := Decorate(DefaultProcessor{}, decorators...)
+	return p, nil
+}
+
+// loadConfig loads the config for the GatewayConfig
+func (gw *BackendGateway) loadConfig(cfg BackendConfig) error {
+	configType := BaseConfig(&GatewayConfig{})
+	// Note: treat config values as immutable
+	// if you need to change a config value, change in the file then
+	// send a SIGHUP
+	bcfg, err := Svc.ExtractConfig(cfg, configType)
+	if err != nil {
+		return err
+	}
+	gw.gwConfig = bcfg.(*GatewayConfig)
+	return nil
+}
+
+// Initialize builds the workers and initializes each one
+func (gw *BackendGateway) Initialize(cfg BackendConfig) error {
+	gw.Lock()
+	defer gw.Unlock()
+	if gw.State != BackendStateNew && gw.State != BackendStateShuttered {
+		return errors.New("Can only Initialize in BackendStateNew or BackendStateShuttered state")
+	}
+	err := gw.loadConfig(cfg)
+	if err == nil {
+		workersSize := gw.workersSize()
+		if workersSize < 1 {
+			gw.State = BackendStateError
+			return errors.New("Must have at least 1 worker")
+		}
+		gw.processors = make([]Processor, 0)
+		gw.validators = make([]Processor, 0)
+		for i := 0; i < workersSize; i++ {
+			p, err := gw.newStack(gw.gwConfig.SaveProcess)
+			if err != nil {
+				gw.State = BackendStateError
+				return err
+			}
+			gw.processors = append(gw.processors, p)
+
+			v, err := gw.newStack(gw.gwConfig.ValidateProcess)
+			if err != nil {
+				gw.State = BackendStateError
+				return err
+			}
+			gw.validators = append(gw.validators, v)
+		}
+		// initialize processors
+		if err := Svc.initialize(cfg); err != nil {
+			gw.State = BackendStateError
+			return err
+		}
+		if gw.conveyor == nil {
+			gw.conveyor = make(chan *workerMsg, workersSize)
+		}
+		// ready to start
+		gw.State = BackendStateInitialized
+		return nil
+	}
+	gw.State = BackendStateError
+	return err
+}
+
+// Start starts the worker goroutines, assuming it has been initialized or shuttered before
+func (gw *BackendGateway) Start() error {
+	gw.Lock()
+	defer gw.Unlock()
+	if gw.State == BackendStateInitialized || gw.State == BackendStateShuttered {
+		// we start our workers
+		workersSize := gw.workersSize()
+		// make our slice of channels for stopping
+		gw.workStoppers = make([]chan bool, 0)
+		// set the wait group
+		gw.wg.Add(workersSize)
+
+		for i := 0; i < workersSize; i++ {
+			stop := make(chan bool)
+			go func(workerId int, stop chan bool) {
+				// blocks here until the worker exits
+				gw.workDispatcher(
+					gw.conveyor,
+					gw.processors[workerId],
+					gw.validators[workerId],
+					workerId+1,
+					stop)
+				gw.wg.Done()
+			}(i, stop)
+			gw.workStoppers = append(gw.workStoppers, stop)
+		}
+		gw.State = BackendStateRunning
+		return nil
+	} else {
+		return errors.New(fmt.Sprintf("cannot start backend because it's in %s state", gw.State))
+	}
+}
+
+// workersSize gets the number of workers to use for saving email by reading the save_workers_size config value
+// Returns 1 if no config value was set
+func (gw *BackendGateway) workersSize() int {
+	if gw.gwConfig.WorkersSize == 0 {
+		return 1
+	}
+	return gw.gwConfig.WorkersSize
+}
+
+// saveTimeout returns the maximum amount of seconds to wait before timing out a save processing task
+func (gw *BackendGateway) saveTimeout() time.Duration {
+	if gw.gwConfig.TimeoutSave == 0 {
+		return saveTimeout
+	}
+	return time.Duration(gw.gwConfig.TimeoutSave)
+}
+
+// validateRcptTimeout returns the maximum amount of seconds to wait before timing out a recipient validation  task
+func (gw *BackendGateway) validateRcptTimeout() time.Duration {
+	if gw.gwConfig.TimeoutValidateRcpt == 0 {
+		return validateRcptTimeout
+	}
+	return time.Duration(gw.gwConfig.TimeoutValidateRcpt)
+}
+
+func (gw *BackendGateway) workDispatcher(
+	workIn chan *workerMsg,
+	save Processor,
+	validate Processor,
+	workerId int,
+	stop chan bool) {
+
+	defer func() {
+		if r := recover(); r != nil {
+			// recover form closed channel
+			Log().Error("worker recovered form panic:", r, string(debug.Stack()))
+		}
+		// close any connections / files
+		Svc.shutdown()
+
+	}()
+	Log().Infof("processing worker started (#%d)", workerId)
+	for {
+		select {
+		case <-stop:
+			Log().Infof("stop signal for worker (#%d)", workerId)
+			return
+		case msg := <-workIn:
+			if msg == nil {
+				Log().Debugf("worker stopped (#%d)", workerId)
+				return
+			}
+			msg.e.Lock()
+			if msg.task == TaskSaveMail {
+				// process the email here
+				// TODO we should check the err
+				result, _ := save.Process(msg.e, TaskSaveMail)
+				if result.Code() < 300 {
+					// if all good, let the gateway know that it was queued
+					msg.notifyMe <- &notifyMsg{nil, msg.e.QueuedId}
+				} else {
+					// notify the gateway about the error
+					msg.notifyMe <- &notifyMsg{err: errors.New(result.String())}
+				}
+			} else if msg.task == TaskValidateRcpt {
+				_, err := validate.Process(msg.e, TaskValidateRcpt)
+				if err != nil {
+					// validation failed
+					msg.notifyMe <- &notifyMsg{err: err}
+				} else {
+					// all good.
+					msg.notifyMe <- &notifyMsg{err: nil}
+				}
+			}
+			msg.e.Unlock()
+		}
+	}
+}
+
+// stopWorkers sends a signal to all workers to stop
+func (gw *BackendGateway) stopWorkers() {
+	for i := range gw.workStoppers {
+		gw.workStoppers[i] <- true
+	}
+}

+ 113 - 0
backends/gateway_test.go

@@ -0,0 +1,113 @@
+package backends
+
+import (
+	"fmt"
+	"github.com/flashmob/go-guerrilla/log"
+	"github.com/flashmob/go-guerrilla/mail"
+	"strings"
+	"testing"
+	"time"
+)
+
+func TestStates(t *testing.T) {
+	gw := BackendGateway{}
+	str := fmt.Sprintf("%s", gw.State)
+	if strings.Index(str, "NewState") != 0 {
+		t.Error("Backend should begin in NewState")
+	}
+}
+
+func TestInitialize(t *testing.T) {
+	c := BackendConfig{
+		"save_process":       "HeadersParser|Debugger",
+		"log_received_mails": true,
+		"save_workers_size":  "1",
+	}
+
+	gateway := &BackendGateway{}
+	err := gateway.Initialize(c)
+	if err != nil {
+		t.Error("Gateway did not init because:", err)
+		t.Fail()
+	}
+	if gateway.processors == nil {
+		t.Error("gateway.chains should not be nil")
+	} else if len(gateway.processors) != 1 {
+		t.Error("len(gateway.chains) should be 1, but got", len(gateway.processors))
+	}
+
+	if gateway.conveyor == nil {
+		t.Error("gateway.conveyor should not be nil")
+	} else if cap(gateway.conveyor) != gateway.workersSize() {
+		t.Error("gateway.conveyor channel buffer cap does not match worker size, cap was", cap(gateway.conveyor))
+	}
+
+	if gateway.State != BackendStateInitialized {
+		t.Error("gateway.State is not in initialized state, got ", gateway.State)
+	}
+
+}
+
+func TestStartProcessStop(t *testing.T) {
+	c := BackendConfig{
+		"save_process":       "HeadersParser|Debugger",
+		"log_received_mails": true,
+		"save_workers_size":  2,
+	}
+
+	gateway := &BackendGateway{}
+	err := gateway.Initialize(c)
+
+	mainlog, _ := log.GetLogger(log.OutputOff.String())
+	Svc.SetMainlog(mainlog)
+
+	if err != nil {
+		t.Error("Gateway did not init because:", err)
+		t.Fail()
+	}
+	err = gateway.Start()
+	if err != nil {
+		t.Error("Gateway did not start because:", err)
+		t.Fail()
+	}
+	if gateway.State != BackendStateRunning {
+		t.Error("gateway.State is not in rinning state, got ", gateway.State)
+	}
+	// can we place an envelope on the conveyor channel?
+
+	e := &mail.Envelope{
+		RemoteIP: "127.0.0.1",
+		QueuedId: "abc12345",
+		Helo:     "helo.example.com",
+		MailFrom: mail.Address{User: "test", Host: "example.com"},
+		TLS:      true,
+	}
+	e.PushRcpt(mail.Address{User: "test", Host: "example.com"})
+	e.Data.WriteString("Subject:Test\n\nThis is a test.")
+	notify := make(chan *notifyMsg)
+
+	gateway.conveyor <- &workerMsg{e, notify, TaskSaveMail}
+
+	// it should not produce any errors
+	// headers (subject) should be parsed.
+
+	select {
+	case status := <-notify:
+
+		if status.err != nil {
+			t.Error("envelope processing failed with:", status.err)
+		}
+		if e.Header["Subject"][0] != "Test" {
+			t.Error("envelope processing did not parse header")
+		}
+
+	case <-time.After(time.Second):
+		t.Error("gateway did not respond after 1 second")
+		t.Fail()
+	}
+
+	err = gateway.Shutdown()
+	if err != nil {
+		t.Error("Gateway did not shutdown")
+	}
+}

+ 0 - 448
backends/guerrilla_db_redis.go

@@ -1,448 +0,0 @@
-package backends
-
-// This backend is presented here as an example only, please modify it to your needs.
-// The backend stores the email data in Redis.
-// Other meta-information is stored in MySQL to be joined later.
-// A lot of email gets discarded without viewing on Guerrilla Mail,
-// so it's much faster to put in Redis, where other programs can
-// process it later, without touching the disk.
-//
-// Some features:
-// - It batches the SQL inserts into a single query and inserts either after a time threshold or if the batch is full
-// - If the mysql driver crashes, it's able to recover, log the incident and resume again.
-// - It also does a clean shutdown - it tries to save everything before returning
-//
-// Short history:
-// Started with issuing an insert query for each single email and another query to update the tally
-// Then applied the following optimizations:
-// - Moved tally updates to another background process which does the tallying in a single query
-// - Changed the MySQL queries to insert in batch
-// - Made a Compressor that recycles buffers using sync.Pool
-// The result was around 400% speed improvement. If you know of any more improvements, please share!
-// - Added the recovery mechanism,
-
-import (
-	"fmt"
-
-	"time"
-
-	"github.com/garyburd/redigo/redis"
-
-	"bytes"
-	"compress/zlib"
-	"database/sql"
-	_ "github.com/go-sql-driver/mysql"
-
-	"github.com/go-sql-driver/mysql"
-	"io"
-	"runtime/debug"
-	"strings"
-	"sync"
-)
-
-// how many rows to batch at a time
-const GuerrillaDBAndRedisBatchMax = 2
-
-// tick on every...
-const GuerrillaDBAndRedisBatchTimeout = time.Second * 3
-
-func init() {
-	backends["guerrilla-db-redis"] = &AbstractBackend{
-		extend: &GuerrillaDBAndRedisBackend{}}
-}
-
-type GuerrillaDBAndRedisBackend struct {
-	AbstractBackend
-	config    guerrillaDBAndRedisConfig
-	batcherWg sync.WaitGroup
-	// cache prepared queries
-	cache stmtCache
-}
-
-// statement cache. It's an array, not slice
-type stmtCache [GuerrillaDBAndRedisBatchMax]*sql.Stmt
-
-type guerrillaDBAndRedisConfig struct {
-	NumberOfWorkers    int    `json:"save_workers_size"`
-	MysqlTable         string `json:"mail_table"`
-	MysqlDB            string `json:"mysql_db"`
-	MysqlHost          string `json:"mysql_host"`
-	MysqlPass          string `json:"mysql_pass"`
-	MysqlUser          string `json:"mysql_user"`
-	RedisExpireSeconds int    `json:"redis_expire_seconds"`
-	RedisInterface     string `json:"redis_interface"`
-	PrimaryHost        string `json:"primary_mail_host"`
-}
-
-func convertError(name string) error {
-	return fmt.Errorf("failed to load backend config (%s)", name)
-}
-
-// Load the backend config for the backend. It has already been unmarshalled
-// from the main config file 'backend' config "backend_config"
-// Now we need to convert each type and copy into the guerrillaDBAndRedisConfig struct
-func (g *GuerrillaDBAndRedisBackend) loadConfig(backendConfig BackendConfig) (err error) {
-	configType := baseConfig(&guerrillaDBAndRedisConfig{})
-	bcfg, err := g.extractConfig(backendConfig, configType)
-	if err != nil {
-		return err
-	}
-	m := bcfg.(*guerrillaDBAndRedisConfig)
-	g.config = *m
-	return nil
-}
-
-func (g *GuerrillaDBAndRedisBackend) getNumberOfWorkers() int {
-	return g.config.NumberOfWorkers
-}
-
-type redisClient struct {
-	isConnected bool
-	conn        redis.Conn
-	time        int
-}
-
-// compressedData struct will be compressed using zlib when printed via fmt
-type compressedData struct {
-	extraHeaders []byte
-	data         *bytes.Buffer
-	pool         *sync.Pool
-}
-
-// newCompressedData returns a new CompressedData
-func newCompressedData() *compressedData {
-	var p = sync.Pool{
-		New: func() interface{} {
-			var b bytes.Buffer
-			return &b
-		},
-	}
-	return &compressedData{
-		pool: &p,
-	}
-}
-
-// Set the extraheaders and buffer of data to compress
-func (c *compressedData) set(b []byte, d *bytes.Buffer) {
-	c.extraHeaders = b
-	c.data = d
-}
-
-// implement Stringer interface
-func (c *compressedData) String() string {
-	if c.data == nil {
-		return ""
-	}
-	//borrow a buffer form the pool
-	b := c.pool.Get().(*bytes.Buffer)
-	// put back in the pool
-	defer func() {
-		b.Reset()
-		c.pool.Put(b)
-	}()
-
-	var r *bytes.Reader
-	w, _ := zlib.NewWriterLevel(b, zlib.BestSpeed)
-	r = bytes.NewReader(c.extraHeaders)
-	io.Copy(w, r)
-	io.Copy(w, c.data)
-	w.Close()
-	return b.String()
-}
-
-// clear it, without clearing the pool
-func (c *compressedData) clear() {
-	c.extraHeaders = []byte{}
-	c.data = nil
-}
-
-// prepares the sql query with the number of rows that can be batched with it
-func (g *GuerrillaDBAndRedisBackend) prepareInsertQuery(rows int, db *sql.DB) *sql.Stmt {
-	if rows == 0 {
-		panic("rows argument cannot be 0")
-	}
-	if g.cache[rows-1] != nil {
-		return g.cache[rows-1]
-	}
-	sqlstr := "INSERT INTO " + g.config.MysqlTable + " "
-	sqlstr += "(`date`, `to`, `from`, `subject`, `body`, `charset`, `mail`, `spam_score`, `hash`, `content_type`, `recipient`, `has_attach`, `ip_addr`, `return_path`, `is_tls`)"
-	sqlstr += " values "
-	values := "(NOW(), ?, ?, ?, ? , 'UTF-8' , ?, 0, ?, '', ?, 0, ?, ?, ?)"
-	// add more rows
-	comma := ""
-	for i := 0; i < rows; i++ {
-		sqlstr += comma + values
-		if comma == "" {
-			comma = ","
-		}
-	}
-	stmt, sqlErr := db.Prepare(sqlstr)
-	if sqlErr != nil {
-		mainlog.WithError(sqlErr).Fatalf("failed while db.Prepare(INSERT...)")
-	}
-	// cache it
-	g.cache[rows-1] = stmt
-	return stmt
-}
-
-func (g *GuerrillaDBAndRedisBackend) doQuery(c int, db *sql.DB, insertStmt *sql.Stmt, vals *[]interface{}) {
-	var execErr error
-	defer func() {
-		if r := recover(); r != nil {
-			//logln(1, fmt.Sprintf("Recovered in %v", r))
-			mainlog.Error("Recovered form panic:", r, string(debug.Stack()))
-			sum := 0
-			for _, v := range *vals {
-				if str, ok := v.(string); ok {
-					sum = sum + len(str)
-				}
-			}
-			mainlog.Errorf("panic while inserting query [%s] size:%d, err %v", r, sum, execErr)
-			panic("query failed")
-		}
-	}()
-	// prepare the query used to insert when rows reaches batchMax
-	insertStmt = g.prepareInsertQuery(c, db)
-	_, execErr = insertStmt.Exec(*vals...)
-	if execErr != nil {
-		mainlog.WithError(execErr).Error("There was a problem the insert")
-	}
-}
-
-// Batches the rows from the feeder chan in to a single INSERT statement.
-// Execute the batches query when:
-// - number of batched rows reaches a threshold, i.e. count n = threshold
-// - or, no new rows within a certain time, i.e. times out
-// The goroutine can either exit if there's a panic or feeder channel closes
-// it returns feederOk which signals if the feeder chanel was ok (still open) while returning
-// if it feederOk is false, then it means the feeder chanel is closed
-func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(feeder chan []interface{}, db *sql.DB) (feederOk bool) {
-	// controls shutdown
-	defer g.batcherWg.Done()
-	g.batcherWg.Add(1)
-	// vals is where values are batched to
-	var vals []interface{}
-	// how many rows were batched
-	count := 0
-	// The timer will tick every second.
-	// Interrupting the select clause when there's no data on the feeder channel
-	t := time.NewTimer(GuerrillaDBAndRedisBatchTimeout)
-	// prepare the query used to insert when rows reaches batchMax
-	insertStmt := g.prepareInsertQuery(GuerrillaDBAndRedisBatchMax, db)
-	// inserts executes a batched insert query, clears the vals and resets the count
-	insert := func(c int) {
-		if c > 0 {
-			g.doQuery(c, db, insertStmt, &vals)
-		}
-		vals = nil
-		count = 0
-	}
-	defer func() {
-		if r := recover(); r != nil {
-			mainlog.Error("insertQueryBatcher caught a panic", r)
-		}
-	}()
-	// Keep getting values from feeder and add to batch.
-	// if feeder times out, execute the batched query
-	// otherwise, execute the batched query once it reaches the GuerrillaDBAndRedisBatchMax threshold
-	feederOk = true
-	for {
-		select {
-		// it may panic when reading on a closed feeder channel. feederOK detects if it was closed
-		case row, feederOk := <-feeder:
-			if row == nil {
-				mainlog.Info("Query batchaer exiting")
-				// Insert any remaining rows
-				insert(count)
-				return feederOk
-			}
-			vals = append(vals, row...)
-			count++
-			mainlog.Debug("new feeder row:", row, " cols:", len(row), " count:", count, " worker", workerId)
-			if count >= GuerrillaDBAndRedisBatchMax {
-				insert(GuerrillaDBAndRedisBatchMax)
-			}
-			// stop timer from firing (reset the interrupt)
-			if !t.Stop() {
-				<-t.C
-			}
-			t.Reset(GuerrillaDBAndRedisBatchTimeout)
-		case <-t.C:
-			// anything to insert?
-			if n := len(vals); n > 0 {
-				insert(count)
-			}
-			t.Reset(GuerrillaDBAndRedisBatchTimeout)
-		}
-	}
-}
-
-func trimToLimit(str string, limit int) string {
-	ret := strings.TrimSpace(str)
-	if len(str) > limit {
-		ret = str[:limit]
-	}
-	return ret
-}
-
-var workerId = 0
-
-func (g *GuerrillaDBAndRedisBackend) mysqlConnect() (*sql.DB, error) {
-	conf := mysql.Config{
-		User:         g.config.MysqlUser,
-		Passwd:       g.config.MysqlPass,
-		DBName:       g.config.MysqlDB,
-		Net:          "tcp",
-		Addr:         g.config.MysqlHost,
-		ReadTimeout:  GuerrillaDBAndRedisBatchTimeout + (time.Second * 10),
-		WriteTimeout: GuerrillaDBAndRedisBatchTimeout + (time.Second * 10),
-		Params:       map[string]string{"collation": "utf8_general_ci"},
-	}
-	if db, err := sql.Open("mysql", conf.FormatDSN()); err != nil {
-		mainlog.Error("cannot open mysql", err)
-		return nil, err
-	} else {
-		return db, nil
-	}
-
-}
-
-func (g *GuerrillaDBAndRedisBackend) saveMailWorker(saveMailChan chan *savePayload) {
-	var to, body string
-
-	var redisErr error
-
-	workerId++
-
-	redisClient := &redisClient{}
-	var db *sql.DB
-	var err error
-	db, err = g.mysqlConnect()
-	if err != nil {
-		mainlog.Fatalf("cannot open mysql: %s", err)
-	}
-
-	// start the query SQL batching where we will send data via the feeder channel
-	feeder := make(chan []interface{}, 1)
-	go func() {
-		for {
-			if feederOK := g.insertQueryBatcher(feeder, db); !feederOK {
-				mainlog.Debug("insertQueryBatcher exited")
-				return
-			}
-			// if insertQueryBatcher panics, it can recover and go in again
-			mainlog.Debug("resuming insertQueryBatcher")
-		}
-
-	}()
-
-	defer func() {
-		if r := recover(); r != nil {
-			//recover form closed channel
-			mainlog.Error("panic recovered in saveMailWorker", r)
-		}
-		db.Close()
-		if redisClient.conn != nil {
-			mainlog.Infof("closed redis")
-			redisClient.conn.Close()
-		}
-		// close the feeder & wait for query batcher to exit.
-		close(feeder)
-		g.batcherWg.Wait()
-
-	}()
-	var vals []interface{}
-	data := newCompressedData()
-	//  receives values from the channel repeatedly until it is closed.
-
-	for {
-		payload := <-saveMailChan
-		if payload == nil {
-			mainlog.Debug("No more saveMailChan payload")
-			return
-		}
-		mainlog.Debug("Got mail from chan", payload.mail.RemoteAddress)
-		to = trimToLimit(strings.TrimSpace(payload.recipient.User)+"@"+g.config.PrimaryHost, 255)
-		payload.mail.Helo = trimToLimit(payload.mail.Helo, 255)
-		payload.recipient.Host = trimToLimit(payload.recipient.Host, 255)
-		ts := fmt.Sprintf("%d", time.Now().UnixNano())
-		payload.mail.ParseHeaders()
-		hash := MD5Hex(
-			to,
-			payload.mail.MailFrom.String(),
-			payload.mail.Subject,
-			ts)
-		// Add extra headers
-		var addHead string
-		addHead += "Delivered-To: " + to + "\r\n"
-		addHead += "Received: from " + payload.mail.Helo + " (" + payload.mail.Helo + "  [" + payload.mail.RemoteAddress + "])\r\n"
-		addHead += "	by " + payload.recipient.Host + " with SMTP id " + hash + "@" + payload.recipient.Host + ";\r\n"
-		addHead += "	" + time.Now().Format(time.RFC1123Z) + "\r\n"
-
-		// data will be compressed when printed, with addHead added to beginning
-
-		data.set([]byte(addHead), &payload.mail.Data)
-		body = "gzencode"
-
-		// data will be written to redis - it implements the Stringer interface, redigo uses fmt to
-		// print the data to redis.
-
-		redisErr = redisClient.redisConnection(g.config.RedisInterface)
-		if redisErr == nil {
-			_, doErr := redisClient.conn.Do("SETEX", hash, g.config.RedisExpireSeconds, data)
-			if doErr == nil {
-				body = "redis" // the backend system will know to look in redis for the message data
-				data.clear()   // blank
-			}
-		} else {
-			mainlog.WithError(redisErr).Warn("Error while connecting redis")
-		}
-
-		vals = []interface{}{} // clear the vals
-		vals = append(vals,
-			trimToLimit(to, 255),
-			trimToLimit(payload.mail.MailFrom.String(), 255),
-			trimToLimit(payload.mail.Subject, 255),
-			body,
-			data.String(),
-			hash,
-			trimToLimit(to, 255),
-			payload.mail.RemoteAddress,
-			trimToLimit(payload.mail.MailFrom.String(), 255),
-			payload.mail.TLS)
-		feeder <- vals
-		payload.savedNotify <- &saveStatus{nil, hash}
-
-	}
-}
-
-func (c *redisClient) redisConnection(redisInterface string) (err error) {
-	if c.isConnected == false {
-		c.conn, err = redis.Dial("tcp", redisInterface)
-		if err != nil {
-			// handle error
-			return err
-		}
-		c.isConnected = true
-	}
-	return nil
-}
-
-// test database connection settings
-func (g *GuerrillaDBAndRedisBackend) testSettings() (err error) {
-
-	var db *sql.DB
-
-	if db, err = g.mysqlConnect(); err != nil {
-		err = fmt.Errorf("MySql cannot connect, check your settings: %s", err)
-	} else {
-		db.Close()
-	}
-
-	redisClient := &redisClient{}
-	if redisErr := redisClient.redisConnection(g.config.RedisInterface); redisErr != nil {
-		err = fmt.Errorf("Redis cannot connect, check your settings: %s", redisErr)
-	}
-
-	return
-}

+ 107 - 0
backends/p_compressor.go

@@ -0,0 +1,107 @@
+package backends
+
+import (
+	"bytes"
+	"compress/zlib"
+	"github.com/flashmob/go-guerrilla/mail"
+	"io"
+	"sync"
+)
+
+// ----------------------------------------------------------------------------------
+// Processor Name: compressor
+// ----------------------------------------------------------------------------------
+// Description   : Compress the e.Data (email data) and e.DeliveryHeader together
+// ----------------------------------------------------------------------------------
+// Config Options: None
+// --------------:-------------------------------------------------------------------
+// Input         : e.Data, e.DeliveryHeader generated by Header() processor
+// ----------------------------------------------------------------------------------
+// Output        : sets the pointer to a compressor in e.Info["zlib-compressor"]
+//               : to write the compressed data, simply use fmt to print as a string,
+//               : eg. fmt.Println("%s", e.Info["zlib-compressor"])
+//               : or just call the String() func .Info["zlib-compressor"].String()
+//               : Note that it can only be outputted once. It destroys the buffer
+//               : after being printed
+// ----------------------------------------------------------------------------------
+func init() {
+	processors["compressor"] = func() Decorator {
+		return Compressor()
+	}
+}
+
+// compressedData struct will be compressed using zlib when printed via fmt
+type compressor struct {
+	extraHeaders []byte
+	data         *bytes.Buffer
+	// the pool is used to recycle buffers to ease up on the garbage collector
+	pool *sync.Pool
+}
+
+// newCompressedData returns a new CompressedData
+func newCompressor() *compressor {
+	// grab it from the pool
+	var p = sync.Pool{
+		// if not available, then create a new one
+		New: func() interface{} {
+			var b bytes.Buffer
+			return &b
+		},
+	}
+	return &compressor{
+		pool: &p,
+	}
+}
+
+// Set the extraheaders and buffer of data to compress
+func (c *compressor) set(b []byte, d *bytes.Buffer) {
+	c.extraHeaders = b
+	c.data = d
+}
+
+// String implements the Stringer interface.
+// Can only be called once!
+// This is because the compression buffer will be reset and compressor will be returned to the pool
+func (c *compressor) String() string {
+	if c.data == nil {
+		return ""
+	}
+	//borrow a buffer form the pool
+	b := c.pool.Get().(*bytes.Buffer)
+	// put back in the pool
+	defer func() {
+		b.Reset()
+		c.pool.Put(b)
+	}()
+
+	var r *bytes.Reader
+	w, _ := zlib.NewWriterLevel(b, zlib.BestSpeed)
+	r = bytes.NewReader(c.extraHeaders)
+	io.Copy(w, r)
+	io.Copy(w, c.data)
+	w.Close()
+	return b.String()
+}
+
+// clear it, without clearing the pool
+func (c *compressor) clear() {
+	c.extraHeaders = []byte{}
+	c.data = nil
+}
+
+func Compressor() Decorator {
+	return func(p Processor) Processor {
+		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
+			if task == TaskSaveMail {
+				compressor := newCompressor()
+				compressor.set([]byte(e.DeliveryHeader), &e.Data)
+				// put the pointer in there for other processors to use later in the line
+				e.Values["zlib-compressor"] = compressor
+				// continue to the next Processor in the decorator stack
+				return p.Process(e, task)
+			} else {
+				return p.Process(e, task)
+			}
+		})
+	}
+}

+ 55 - 0
backends/p_debugger.go

@@ -0,0 +1,55 @@
+package backends
+
+import (
+	"github.com/flashmob/go-guerrilla/mail"
+	"strings"
+)
+
+// ----------------------------------------------------------------------------------
+// Processor Name: debugger
+// ----------------------------------------------------------------------------------
+// Description   : Log received emails
+// ----------------------------------------------------------------------------------
+// Config Options: log_received_mails bool - log if true
+// --------------:-------------------------------------------------------------------
+// Input         : e.MailFrom, e.RcptTo, e.Header
+// ----------------------------------------------------------------------------------
+// Output        : none (only output to the log if enabled)
+// ----------------------------------------------------------------------------------
+func init() {
+	processors[strings.ToLower(defaultProcessor)] = func() Decorator {
+		return Debugger()
+	}
+}
+
+type debuggerConfig struct {
+	LogReceivedMails bool `json:"log_received_mails"`
+}
+
+func Debugger() Decorator {
+	var config *debuggerConfig
+	initFunc := InitializeWith(func(backendConfig BackendConfig) error {
+		configType := BaseConfig(&debuggerConfig{})
+		bcfg, err := Svc.ExtractConfig(backendConfig, configType)
+		if err != nil {
+			return err
+		}
+		config = bcfg.(*debuggerConfig)
+		return nil
+	})
+	Svc.AddInitializer(initFunc)
+	return func(p Processor) Processor {
+		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
+			if task == TaskSaveMail {
+				if config.LogReceivedMails {
+					Log().Infof("Mail from: %s / to: %v", e.MailFrom.String(), e.RcptTo)
+					Log().Info("Headers are:", e.Header)
+				}
+				// continue to the next Processor in the decorator stack
+				return p.Process(e, task)
+			} else {
+				return p.Process(e, task)
+			}
+		})
+	}
+}

+ 482 - 0
backends/p_guerrilla_db_redis.go

@@ -0,0 +1,482 @@
+package backends
+
+import (
+	"bytes"
+	"compress/zlib"
+	"database/sql"
+	"fmt"
+	"github.com/flashmob/go-guerrilla/mail"
+	"github.com/garyburd/redigo/redis"
+	"github.com/go-sql-driver/mysql"
+	"io"
+	"math/rand"
+	"runtime/debug"
+	"strings"
+	"sync"
+	"time"
+)
+
+// ----------------------------------------------------------------------------------
+// Processor Name: GuerrillaRedsDB
+// ----------------------------------------------------------------------------------
+// Description   : Saves the body to redis, meta data to mysql. Example only.
+//               : Limitation: it doesn't save multiple recipients or validate them
+// ----------------------------------------------------------------------------------
+// Config Options: ...
+// --------------:-------------------------------------------------------------------
+// Input         : envelope
+// ----------------------------------------------------------------------------------
+// Output        :
+// ----------------------------------------------------------------------------------
+func init() {
+	processors["guerrillaredisdb"] = func() Decorator {
+		return GuerrillaDbReddis()
+	}
+}
+
+var queryBatcherId = 0
+
+// how many rows to batch at a time
+const GuerrillaDBAndRedisBatchMax = 50
+
+// tick on every...
+const GuerrillaDBAndRedisBatchTimeout = time.Second * 3
+
+type GuerrillaDBAndRedisBackend struct {
+	config    *guerrillaDBAndRedisConfig
+	batcherWg sync.WaitGroup
+	// cache prepared queries
+	cache stmtCache
+
+	batcherStoppers []chan bool
+}
+
+// statement cache. It's an array, not slice
+type stmtCache [GuerrillaDBAndRedisBatchMax]*sql.Stmt
+
+type guerrillaDBAndRedisConfig struct {
+	NumberOfWorkers    int    `json:"save_workers_size"`
+	MysqlTable         string `json:"mail_table"`
+	MysqlDB            string `json:"mysql_db"`
+	MysqlHost          string `json:"mysql_host"`
+	MysqlPass          string `json:"mysql_pass"`
+	MysqlUser          string `json:"mysql_user"`
+	RedisExpireSeconds int    `json:"redis_expire_seconds"`
+	RedisInterface     string `json:"redis_interface"`
+	PrimaryHost        string `json:"primary_mail_host"`
+	BatchTimeout       int    `json:"redis_mysql_batch_timeout,omitempty"`
+}
+
+// Load the backend config for the backend. It has already been unmarshalled
+// from the main config file 'backend' config "backend_config"
+// Now we need to convert each type and copy into the guerrillaDBAndRedisConfig struct
+func (g *GuerrillaDBAndRedisBackend) loadConfig(backendConfig BackendConfig) (err error) {
+	configType := BaseConfig(&guerrillaDBAndRedisConfig{})
+	bcfg, err := Svc.ExtractConfig(backendConfig, configType)
+	if err != nil {
+		return err
+	}
+	m := bcfg.(*guerrillaDBAndRedisConfig)
+	g.config = m
+	return nil
+}
+
+func (g *GuerrillaDBAndRedisBackend) getNumberOfWorkers() int {
+	return g.config.NumberOfWorkers
+}
+
+type redisClient struct {
+	isConnected bool
+	conn        redis.Conn
+	time        int
+}
+
+// compressedData struct will be compressed using zlib when printed via fmt
+type compressedData struct {
+	extraHeaders []byte
+	data         *bytes.Buffer
+	pool         *sync.Pool
+}
+
+// newCompressedData returns a new CompressedData
+func newCompressedData() *compressedData {
+	var p = sync.Pool{
+		New: func() interface{} {
+			var b bytes.Buffer
+			return &b
+		},
+	}
+	return &compressedData{
+		pool: &p,
+	}
+}
+
+// Set the extraheaders and buffer of data to compress
+func (c *compressedData) set(b []byte, d *bytes.Buffer) {
+	c.extraHeaders = b
+	c.data = d
+}
+
+// implement Stringer interface
+func (c *compressedData) String() string {
+	if c.data == nil {
+		return ""
+	}
+	//borrow a buffer form the pool
+	b := c.pool.Get().(*bytes.Buffer)
+	// put back in the pool
+	defer func() {
+		b.Reset()
+		c.pool.Put(b)
+	}()
+
+	var r *bytes.Reader
+	w, _ := zlib.NewWriterLevel(b, zlib.BestSpeed)
+	r = bytes.NewReader(c.extraHeaders)
+	io.Copy(w, r)
+	io.Copy(w, c.data)
+	w.Close()
+	return b.String()
+}
+
+// clear it, without clearing the pool
+func (c *compressedData) clear() {
+	c.extraHeaders = []byte{}
+	c.data = nil
+}
+
+// prepares the sql query with the number of rows that can be batched with it
+func (g *GuerrillaDBAndRedisBackend) prepareInsertQuery(rows int, db *sql.DB) *sql.Stmt {
+	if rows == 0 {
+		panic("rows argument cannot be 0")
+	}
+	if g.cache[rows-1] != nil {
+		return g.cache[rows-1]
+	}
+	sqlstr := "INSERT INTO " + g.config.MysqlTable + " "
+	sqlstr += "(`date`, `to`, `from`, `subject`, `body`, `charset`, `mail`, `spam_score`, `hash`, `content_type`, `recipient`, `has_attach`, `ip_addr`, `return_path`, `is_tls`)"
+	sqlstr += " values "
+	values := "(NOW(), ?, ?, ?, ? , 'UTF-8' , ?, 0, ?, '', ?, 0, ?, ?, ?)"
+	// add more rows
+	comma := ""
+	for i := 0; i < rows; i++ {
+		sqlstr += comma + values
+		if comma == "" {
+			comma = ","
+		}
+	}
+	stmt, sqlErr := db.Prepare(sqlstr)
+	if sqlErr != nil {
+		Log().WithError(sqlErr).Fatalf("failed while db.Prepare(INSERT...)")
+	}
+	// cache it
+	g.cache[rows-1] = stmt
+	return stmt
+}
+
+func (g *GuerrillaDBAndRedisBackend) doQuery(c int, db *sql.DB, insertStmt *sql.Stmt, vals *[]interface{}) error {
+	var execErr error
+	defer func() {
+		if r := recover(); r != nil {
+			//logln(1, fmt.Sprintf("Recovered in %v", r))
+			Log().Error("Recovered form panic:", r, string(debug.Stack()))
+			sum := 0
+			for _, v := range *vals {
+				if str, ok := v.(string); ok {
+					sum = sum + len(str)
+				}
+			}
+			Log().Errorf("panic while inserting query [%s] size:%d, err %v", r, sum, execErr)
+			panic("query failed")
+		}
+	}()
+	// prepare the query used to insert when rows reaches batchMax
+	insertStmt = g.prepareInsertQuery(c, db)
+	_, execErr = insertStmt.Exec(*vals...)
+	//if rand.Intn(2) == 1 {
+	//	return errors.New("uggabooka")
+	//}
+	if execErr != nil {
+		Log().WithError(execErr).Error("There was a problem the insert")
+	}
+	return execErr
+}
+
+// Batches the rows from the feeder chan in to a single INSERT statement.
+// Execute the batches query when:
+// - number of batched rows reaches a threshold, i.e. count n = threshold
+// - or, no new rows within a certain time, i.e. times out
+// The goroutine can either exit if there's a panic or feeder channel closes
+// it returns feederOk which signals if the feeder chanel was ok (still open) while returning
+// if it feederOk is false, then it means the feeder chanel is closed
+func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(
+	feeder feedChan,
+	db *sql.DB,
+	batcherId int,
+	stop chan bool) (feederOk bool) {
+
+	// controls shutdown
+	defer g.batcherWg.Done()
+	g.batcherWg.Add(1)
+	// vals is where values are batched to
+	var vals []interface{}
+	// how many rows were batched
+	count := 0
+	// The timer will tick x seconds.
+	// Interrupting the select clause when there's no data on the feeder channel
+	timeo := GuerrillaDBAndRedisBatchTimeout
+	if g.config.BatchTimeout > 0 {
+		timeo = time.Duration(g.config.BatchTimeout)
+	}
+	t := time.NewTimer(timeo)
+	// prepare the query used to insert when rows reaches batchMax
+	insertStmt := g.prepareInsertQuery(GuerrillaDBAndRedisBatchMax, db)
+	// inserts executes a batched insert query, clears the vals and resets the count
+	inserter := func(c int) {
+		if c > 0 {
+			err := g.doQuery(c, db, insertStmt, &vals)
+			if err != nil {
+				// maybe connection prob?
+				// retry the sql query
+				attempts := 3
+				for i := 0; i < attempts; i++ {
+					Log().Infof("retrying query query rows[%c] ", c)
+					time.Sleep(time.Second)
+					err = g.doQuery(c, db, insertStmt, &vals)
+					if err == nil {
+						continue
+					}
+				}
+			}
+		}
+		vals = nil
+		count = 0
+	}
+	rand.Seed(time.Now().UnixNano())
+	defer func() {
+		if r := recover(); r != nil {
+			Log().Error("insertQueryBatcher caught a panic", r, string(debug.Stack()))
+		}
+	}()
+	// Keep getting values from feeder and add to batch.
+	// if feeder times out, execute the batched query
+	// otherwise, execute the batched query once it reaches the GuerrillaDBAndRedisBatchMax threshold
+	feederOk = true
+	for {
+		select {
+		// it may panic when reading on a closed feeder channel. feederOK detects if it was closed
+		case <-stop:
+			Log().Infof("MySQL query batcher stopped (#%d)", batcherId)
+			// Insert any remaining rows
+			inserter(count)
+			feederOk = false
+			close(feeder)
+			return
+		case row := <-feeder:
+
+			vals = append(vals, row...)
+			count++
+			Log().Debug("new feeder row:", row, " cols:", len(row), " count:", count, " worker", batcherId)
+			if count >= GuerrillaDBAndRedisBatchMax {
+				inserter(GuerrillaDBAndRedisBatchMax)
+			}
+			// stop timer from firing (reset the interrupt)
+			if !t.Stop() {
+				// darin the timer
+				<-t.C
+			}
+			t.Reset(timeo)
+		case <-t.C:
+			// anything to insert?
+			if n := len(vals); n > 0 {
+				inserter(count)
+			}
+			t.Reset(timeo)
+		}
+	}
+}
+
+func trimToLimit(str string, limit int) string {
+	ret := strings.TrimSpace(str)
+	if len(str) > limit {
+		ret = str[:limit]
+	}
+	return ret
+}
+
+func (g *GuerrillaDBAndRedisBackend) mysqlConnect() (*sql.DB, error) {
+	tOut := GuerrillaDBAndRedisBatchTimeout
+	if g.config.BatchTimeout > 0 {
+		tOut = time.Duration(g.config.BatchTimeout)
+	}
+	tOut += 10
+	// don't go to 30 sec or more
+	if tOut >= 30 {
+		tOut = 29
+	}
+	conf := mysql.Config{
+		User:         g.config.MysqlUser,
+		Passwd:       g.config.MysqlPass,
+		DBName:       g.config.MysqlDB,
+		Net:          "tcp",
+		Addr:         g.config.MysqlHost,
+		ReadTimeout:  tOut * time.Second,
+		WriteTimeout: tOut * time.Second,
+		Params:       map[string]string{"collation": "utf8_general_ci"},
+	}
+	if db, err := sql.Open("mysql", conf.FormatDSN()); err != nil {
+		Log().Error("cannot open mysql", err, "]")
+		return nil, err
+	} else {
+		// do we have access?
+		_, err = db.Query("SELECT mail_id FROM " + g.config.MysqlTable + " LIMIT 1")
+		if err != nil {
+			Log().Error("cannot select table", err)
+			return nil, err
+		}
+		return db, nil
+	}
+}
+
+func (c *redisClient) redisConnection(redisInterface string) (err error) {
+	if c.isConnected == false {
+		c.conn, err = redis.Dial("tcp", redisInterface)
+		if err != nil {
+			// handle error
+			return err
+		}
+		c.isConnected = true
+	}
+	return nil
+}
+
+type feedChan chan []interface{}
+
+// GuerrillaDbReddis is a specialized processor for Guerrilla mail. It is here as an example.
+// It's an example of a 'monolithic' processor.
+func GuerrillaDbReddis() Decorator {
+
+	g := GuerrillaDBAndRedisBackend{}
+	redisClient := &redisClient{}
+
+	var db *sql.DB
+	var to, body string
+
+	var redisErr error
+
+	var feeders []feedChan
+
+	g.batcherStoppers = make([]chan bool, 0)
+
+	Svc.AddInitializer(InitializeWith(func(backendConfig BackendConfig) error {
+
+		configType := BaseConfig(&guerrillaDBAndRedisConfig{})
+		bcfg, err := Svc.ExtractConfig(backendConfig, configType)
+		if err != nil {
+			return err
+		}
+		g.config = bcfg.(*guerrillaDBAndRedisConfig)
+		db, err = g.mysqlConnect()
+		if err != nil {
+			return err
+		}
+		queryBatcherId++
+		// start the query SQL batching where we will send data via the feeder channel
+		stop := make(chan bool)
+		feeder := make(feedChan, 1)
+		go func(qbID int, stop chan bool) {
+			// we loop so that if insertQueryBatcher panics, it can recover and go in again
+			for {
+				if feederOK := g.insertQueryBatcher(feeder, db, qbID, stop); !feederOK {
+					Log().Debugf("insertQueryBatcher exited (#%d)", qbID)
+					return
+				}
+				Log().Debug("resuming insertQueryBatcher")
+			}
+		}(queryBatcherId, stop)
+		g.batcherStoppers = append(g.batcherStoppers, stop)
+		feeders = append(feeders, feeder)
+		return nil
+	}))
+
+	Svc.AddShutdowner(ShutdownWith(func() error {
+		db.Close()
+		Log().Infof("closed mysql")
+		if redisClient.conn != nil {
+			Log().Infof("closed redis")
+			redisClient.conn.Close()
+		}
+		// send a close signal to all query batchers to exit.
+		for i := range g.batcherStoppers {
+			g.batcherStoppers[i] <- true
+		}
+		g.batcherWg.Wait()
+
+		return nil
+	}))
+
+	var vals []interface{}
+	data := newCompressedData()
+
+	return func(p Processor) Processor {
+		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
+			if task == TaskSaveMail {
+				Log().Debug("Got mail from chan,", e.RemoteIP)
+				to = trimToLimit(strings.TrimSpace(e.RcptTo[0].User)+"@"+g.config.PrimaryHost, 255)
+				e.Helo = trimToLimit(e.Helo, 255)
+				e.RcptTo[0].Host = trimToLimit(e.RcptTo[0].Host, 255)
+				ts := fmt.Sprintf("%d", time.Now().UnixNano())
+				e.ParseHeaders()
+				hash := MD5Hex(
+					to,
+					e.MailFrom.String(),
+					e.Subject,
+					ts)
+				// Add extra headers
+				var addHead string
+				addHead += "Delivered-To: " + to + "\r\n"
+				addHead += "Received: from " + e.Helo + " (" + e.Helo + "  [" + e.RemoteIP + "])\r\n"
+				addHead += "	by " + e.RcptTo[0].Host + " with SMTP id " + hash + "@" + e.RcptTo[0].Host + ";\r\n"
+				addHead += "	" + time.Now().Format(time.RFC1123Z) + "\r\n"
+
+				// data will be compressed when printed, with addHead added to beginning
+
+				data.set([]byte(addHead), &e.Data)
+				body = "gzencode"
+
+				// data will be written to redis - it implements the Stringer interface, redigo uses fmt to
+				// print the data to redis.
+
+				redisErr = redisClient.redisConnection(g.config.RedisInterface)
+				if redisErr == nil {
+					_, doErr := redisClient.conn.Do("SETEX", hash, g.config.RedisExpireSeconds, data)
+					if doErr == nil {
+						body = "redis" // the backend system will know to look in redis for the message data
+						data.clear()   // blank
+					}
+				} else {
+					Log().WithError(redisErr).Warn("Error while connecting redis")
+				}
+
+				vals = []interface{}{} // clear the vals
+				vals = append(vals,
+					trimToLimit(to, 255),
+					trimToLimit(e.MailFrom.String(), 255),
+					trimToLimit(e.Subject, 255),
+					body,
+					data.String(),
+					hash,
+					trimToLimit(to, 255),
+					e.RemoteIP,
+					trimToLimit(e.MailFrom.String(), 255),
+					e.TLS)
+				// give the values to a random query batcher
+				feeders[rand.Intn(len(feeders))] <- vals
+				return p.Process(e, task)
+
+			} else {
+				return p.Process(e, task)
+			}
+		})
+	}
+}

+ 0 - 0
backends/guerrilla_db_redis_test.go → backends/p_guerrilla_db_redis_test.go


+ 58 - 0
backends/p_hasher.go

@@ -0,0 +1,58 @@
+package backends
+
+import (
+	"crypto/md5"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/flashmob/go-guerrilla/mail"
+)
+
+// ----------------------------------------------------------------------------------
+// Processor Name: hasher
+// ----------------------------------------------------------------------------------
+// Description   : Generates a unique md5 checksum id for an email
+// ----------------------------------------------------------------------------------
+// Config Options: None
+// --------------:-------------------------------------------------------------------
+// Input         : e.MailFrom, e.Subject, e.RcptTo
+//               : assuming e.Subject was generated by "headersparser" processor
+// ----------------------------------------------------------------------------------
+// Output        : Checksum stored in e.Hash
+// ----------------------------------------------------------------------------------
+func init() {
+	processors["hasher"] = func() Decorator {
+		return Hasher()
+	}
+}
+
+// The hasher decorator computes a hash of the email for each recipient
+// It appends the hashes to envelope's Hashes slice.
+func Hasher() Decorator {
+	return func(p Processor) Processor {
+		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
+
+			if task == TaskSaveMail {
+				// base hash, use subject from and timestamp-nano
+				h := md5.New()
+				ts := fmt.Sprintf("%d", time.Now().UnixNano())
+				io.Copy(h, strings.NewReader(e.MailFrom.String()))
+				io.Copy(h, strings.NewReader(e.Subject))
+				io.Copy(h, strings.NewReader(ts))
+				// using the base hash, calculate a unique hash for each recipient
+				for i := range e.RcptTo {
+					h2 := h
+					io.Copy(h2, strings.NewReader(e.RcptTo[i].String()))
+					sum := h2.Sum([]byte{})
+					e.Hashes = append(e.Hashes, fmt.Sprintf("%x", sum))
+				}
+				return p.Process(e, task)
+			} else {
+				return p.Process(e, task)
+			}
+
+		})
+	}
+}

+ 74 - 0
backends/p_header.go

@@ -0,0 +1,74 @@
+package backends
+
+import (
+	"github.com/flashmob/go-guerrilla/mail"
+	"strings"
+	"time"
+)
+
+type HeaderConfig struct {
+	PrimaryHost string `json:"primary_mail_host"`
+}
+
+// ----------------------------------------------------------------------------------
+// Processor Name: header
+// ----------------------------------------------------------------------------------
+// Description   : Adds delivery information headers to e.DeliveryHeader
+// ----------------------------------------------------------------------------------
+// Config Options: none
+// --------------:-------------------------------------------------------------------
+// Input         : e.Helo
+//               : e.RemoteAddress
+//               : e.RcptTo
+//               : e.Hashes
+// ----------------------------------------------------------------------------------
+// Output        : Sets e.DeliveryHeader with additional delivery info
+// ----------------------------------------------------------------------------------
+func init() {
+	processors["header"] = func() Decorator {
+		return Header()
+	}
+}
+
+// Generate the MTA delivery header
+// Sets e.DeliveryHeader part of the envelope with the generated header
+func Header() Decorator {
+
+	var config *HeaderConfig
+
+	Svc.AddInitializer(InitializeWith(func(backendConfig BackendConfig) error {
+		configType := BaseConfig(&HeaderConfig{})
+		bcfg, err := Svc.ExtractConfig(backendConfig, configType)
+		if err != nil {
+			return err
+		}
+		config = bcfg.(*HeaderConfig)
+		return nil
+	}))
+
+	return func(p Processor) Processor {
+		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
+			if task == TaskSaveMail {
+				to := strings.TrimSpace(e.RcptTo[0].User) + "@" + config.PrimaryHost
+				hash := "unknown"
+				if len(e.Hashes) > 0 {
+					hash = e.Hashes[0]
+				}
+				var addHead string
+				addHead += "Delivered-To: " + to + "\n"
+				addHead += "Received: from " + e.Helo + " (" + e.Helo + "  [" + e.RemoteIP + "])\n"
+				if len(e.RcptTo) > 0 {
+					addHead += "	by " + e.RcptTo[0].Host + " with SMTP id " + hash + "@" + e.RcptTo[0].Host + ";\n"
+				}
+				addHead += "	" + time.Now().Format(time.RFC1123Z) + "\n"
+				// save the result
+				e.DeliveryHeader = addHead
+				// next processor
+				return p.Process(e, task)
+
+			} else {
+				return p.Process(e, task)
+			}
+		})
+	}
+}

+ 37 - 0
backends/p_headers_parser.go

@@ -0,0 +1,37 @@
+package backends
+
+import (
+	"github.com/flashmob/go-guerrilla/mail"
+)
+
+// ----------------------------------------------------------------------------------
+// Processor Name: headersparser
+// ----------------------------------------------------------------------------------
+// Description   : Parses the header using e.ParseHeaders()
+// ----------------------------------------------------------------------------------
+// Config Options: none
+// --------------:-------------------------------------------------------------------
+// Input         : envelope
+// ----------------------------------------------------------------------------------
+// Output        : Headers will be populated in e.Header
+// ----------------------------------------------------------------------------------
+func init() {
+	processors["headersparser"] = func() Decorator {
+		return HeadersParser()
+	}
+}
+
+func HeadersParser() Decorator {
+	return func(p Processor) Processor {
+		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
+			if task == TaskSaveMail {
+				e.ParseHeaders()
+				// next processor
+				return p.Process(e, task)
+			} else {
+				// next processor
+				return p.Process(e, task)
+			}
+		})
+	}
+}

+ 239 - 0
backends/p_mysql.go

@@ -0,0 +1,239 @@
+package backends
+
+import (
+	"database/sql"
+	"strings"
+	"time"
+
+	"github.com/flashmob/go-guerrilla/mail"
+	"github.com/go-sql-driver/mysql"
+
+	"github.com/flashmob/go-guerrilla/response"
+	"runtime/debug"
+)
+
+// ----------------------------------------------------------------------------------
+// Processor Name: mysql
+// ----------------------------------------------------------------------------------
+// Description   : Saves the e.Data (email data) and e.DeliveryHeader together in mysql
+//               : using the hash generated by the "hash" processor and stored in
+//               : e.Hashes
+// ----------------------------------------------------------------------------------
+// Config Options: mail_table string - mysql table name
+//               : mysql_db string - mysql database name
+//               : mysql_host string - mysql host name, eg. 127.0.0.1
+//               : mysql_pass string - mysql password
+//               : mysql_user string - mysql username
+//               : primary_mail_host string - primary host name
+// --------------:-------------------------------------------------------------------
+// Input         : e.Data
+//               : e.DeliveryHeader generated by ParseHeader() processor
+//               : e.MailFrom
+//               : e.Subject - generated by by ParseHeader() processor
+// ----------------------------------------------------------------------------------
+// Output        : Sets e.QueuedId with the first item fromHashes[0]
+// ----------------------------------------------------------------------------------
+func init() {
+	processors["mysql"] = func() Decorator {
+		return MySql()
+	}
+}
+
+const procMySQLReadTimeout = time.Second * 10
+const procMySQLWriteTimeout = time.Second * 10
+
+type MysqlProcessorConfig struct {
+	MysqlTable  string `json:"mail_table"`
+	MysqlDB     string `json:"mysql_db"`
+	MysqlHost   string `json:"mysql_host"`
+	MysqlPass   string `json:"mysql_pass"`
+	MysqlUser   string `json:"mysql_user"`
+	PrimaryHost string `json:"primary_mail_host"`
+}
+
+type MysqlProcessor struct {
+	cache  stmtCache
+	config *MysqlProcessorConfig
+}
+
+func (m *MysqlProcessor) connect(config *MysqlProcessorConfig) (*sql.DB, error) {
+	var db *sql.DB
+	var err error
+	conf := mysql.Config{
+		User:         config.MysqlUser,
+		Passwd:       config.MysqlPass,
+		DBName:       config.MysqlDB,
+		Net:          "tcp",
+		Addr:         config.MysqlHost,
+		ReadTimeout:  procMySQLReadTimeout,
+		WriteTimeout: procMySQLWriteTimeout,
+		Params:       map[string]string{"collation": "utf8_general_ci"},
+	}
+	if db, err = sql.Open("mysql", conf.FormatDSN()); err != nil {
+		Log().Error("cannot open mysql", err)
+		return nil, err
+	}
+	// do we have permission to access the table?
+	_, err = db.Query("SELECT mail_id FROM " + m.config.MysqlTable + "LIMIT 1")
+	if err != nil {
+		Log().Error("cannot select table", err)
+		return nil, err
+	}
+	Log().Info("connected to mysql on tcp ", config.MysqlHost)
+	return db, err
+}
+
+// prepares the sql query with the number of rows that can be batched with it
+func (g *MysqlProcessor) prepareInsertQuery(rows int, db *sql.DB) *sql.Stmt {
+	if rows == 0 {
+		panic("rows argument cannot be 0")
+	}
+	if g.cache[rows-1] != nil {
+		return g.cache[rows-1]
+	}
+	sqlstr := "INSERT INTO " + g.config.MysqlTable + " "
+	sqlstr += "(`date`, `to`, `from`, `subject`, `body`, `charset`, `mail`, `spam_score`, `hash`, `content_type`, `recipient`, `has_attach`, `ip_addr`, `return_path`, `is_tls`)"
+	sqlstr += " values "
+	values := "(NOW(), ?, ?, ?, ? , 'UTF-8' , ?, 0, ?, '', ?, 0, ?, ?, ?)"
+	// add more rows
+	comma := ""
+	for i := 0; i < rows; i++ {
+		sqlstr += comma + values
+		if comma == "" {
+			comma = ","
+		}
+	}
+	stmt, sqlErr := db.Prepare(sqlstr)
+	if sqlErr != nil {
+		Log().WithError(sqlErr).Panic("failed while db.Prepare(INSERT...)")
+	}
+	// cache it
+	g.cache[rows-1] = stmt
+	return stmt
+}
+
+func (g *MysqlProcessor) doQuery(c int, db *sql.DB, insertStmt *sql.Stmt, vals *[]interface{}) {
+	var execErr error
+	defer func() {
+		if r := recover(); r != nil {
+			Log().Error("Recovered form panic:", r, string(debug.Stack()))
+			sum := 0
+			for _, v := range *vals {
+				if str, ok := v.(string); ok {
+					sum = sum + len(str)
+				}
+			}
+			Log().Errorf("panic while inserting query [%s] size:%d, err %v", r, sum, execErr)
+			panic("query failed")
+		}
+	}()
+	// prepare the query used to insert when rows reaches batchMax
+	insertStmt = g.prepareInsertQuery(c, db)
+	_, execErr = insertStmt.Exec(*vals...)
+	if execErr != nil {
+		Log().WithError(execErr).Error("There was a problem the insert")
+	}
+}
+
+func MySql() Decorator {
+
+	var config *MysqlProcessorConfig
+	var vals []interface{}
+	var db *sql.DB
+	mp := &MysqlProcessor{}
+
+	Svc.AddInitializer(InitializeWith(func(backendConfig BackendConfig) error {
+		configType := BaseConfig(&MysqlProcessorConfig{})
+		bcfg, err := Svc.ExtractConfig(backendConfig, configType)
+		if err != nil {
+			return err
+		}
+		config = bcfg.(*MysqlProcessorConfig)
+		mp.config = config
+		db, err = mp.connect(config)
+		if err != nil {
+			Log().Errorf("cannot open mysql: %s", err)
+			return err
+		}
+		return nil
+	}))
+
+	// shutdown
+	Svc.AddShutdowner(ShutdownWith(func() error {
+		if db != nil {
+			return db.Close()
+		}
+		return nil
+	}))
+
+	return func(p Processor) Processor {
+		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
+
+			if task == TaskSaveMail {
+				var to, body string
+				to = trimToLimit(strings.TrimSpace(e.RcptTo[0].User)+"@"+config.PrimaryHost, 255)
+				hash := ""
+				if len(e.Hashes) > 0 {
+					hash = e.Hashes[0]
+					e.QueuedId = e.Hashes[0]
+				}
+
+				var co *compressor
+				// a compressor was set by the Compress processor
+				if c, ok := e.Values["zlib-compressor"]; ok {
+					body = "gzip"
+					co = c.(*compressor)
+				}
+				// was saved in redis by the Redis processor
+				if _, ok := e.Values["redis"]; ok {
+					body = "redis"
+				}
+
+				// build the values for the query
+				vals = []interface{}{} // clear the vals
+				vals = append(vals,
+					to,
+					trimToLimit(e.MailFrom.String(), 255),
+					trimToLimit(e.Subject, 255),
+					body)
+				if body == "redis" {
+					// data already saved in redis
+					vals = append(vals, "")
+				} else if co != nil {
+					// use a compressor (automatically adds e.DeliveryHeader)
+					vals = append(vals, co.String())
+
+				} else {
+					vals = append(vals, e.String())
+				}
+
+				vals = append(vals,
+					hash,
+					to,
+					e.RemoteIP,
+					trimToLimit(e.MailFrom.String(), 255),
+					e.TLS)
+
+				stmt := mp.prepareInsertQuery(1, db)
+				mp.doQuery(1, db, stmt, &vals)
+				// continue to the next Processor in the decorator chain
+				return p.Process(e, task)
+			} else if task == TaskValidateRcpt {
+				// if you need to validate the e.Rcpt then change to:
+				if len(e.RcptTo) > 0 {
+					// since this is called each time a recipient is added
+					// validate only the _last_ recipient that was appended
+					last := e.RcptTo[len(e.RcptTo)-1]
+					if len(last.User) > 255 {
+						// TODO what kind of response to send?
+						return NewResult(response.Canned.FailNoSenderDataCmd), NoSuchUser
+					}
+				}
+				return p.Process(e, task)
+			} else {
+				return p.Process(e, task)
+			}
+
+		})
+	}
+}

+ 130 - 0
backends/p_redis.go

@@ -0,0 +1,130 @@
+package backends
+
+import (
+	"fmt"
+
+	"github.com/flashmob/go-guerrilla/mail"
+	"github.com/flashmob/go-guerrilla/response"
+
+	"github.com/garyburd/redigo/redis"
+)
+
+// ----------------------------------------------------------------------------------
+// Processor Name: redis
+// ----------------------------------------------------------------------------------
+// Description   : Saves the e.Data (email data) and e.DeliveryHeader together in redis
+//               : using the hash generated by the "hash" processor and stored in
+//               : e.Hashes
+// ----------------------------------------------------------------------------------
+// Config Options: redis_expire_seconds int - how many seconds to expiry
+//               : redis_interface string - <host>:<port> eg, 127.0.0.1:6379
+// --------------:-------------------------------------------------------------------
+// Input         : e.Data
+//               : e.DeliveryHeader generated by Header() processor
+//               :
+// ----------------------------------------------------------------------------------
+// Output        : Sets e.QueuedId with the first item fromHashes[0]
+// ----------------------------------------------------------------------------------
+func init() {
+
+	processors["redis"] = func() Decorator {
+		return Redis()
+	}
+}
+
+type RedisProcessorConfig struct {
+	RedisExpireSeconds int    `json:"redis_expire_seconds"`
+	RedisInterface     string `json:"redis_interface"`
+}
+
+type RedisProcessor struct {
+	isConnected bool
+	conn        redis.Conn
+}
+
+func (r *RedisProcessor) redisConnection(redisInterface string) (err error) {
+	if r.isConnected == false {
+		r.conn, err = redis.Dial("tcp", redisInterface)
+		if err != nil {
+			// handle error
+			return err
+		}
+		r.isConnected = true
+	}
+	return nil
+}
+
+// The redis decorator stores the email data in redis
+
+func Redis() Decorator {
+
+	var config *RedisProcessorConfig
+	redisClient := &RedisProcessor{}
+	// read the config into RedisProcessorConfig
+	Svc.AddInitializer(InitializeWith(func(backendConfig BackendConfig) error {
+		configType := BaseConfig(&RedisProcessorConfig{})
+		bcfg, err := Svc.ExtractConfig(backendConfig, configType)
+		if err != nil {
+			return err
+		}
+		config = bcfg.(*RedisProcessorConfig)
+		if redisErr := redisClient.redisConnection(config.RedisInterface); redisErr != nil {
+			err := fmt.Errorf("Redis cannot connect, check your settings: %s", redisErr)
+			return err
+		}
+		return nil
+	}))
+	// When shutting down
+	Svc.AddShutdowner(ShutdownWith(func() error {
+		if redisClient.isConnected {
+			return redisClient.conn.Close()
+		}
+		return nil
+	}))
+
+	var redisErr error
+
+	return func(p Processor) Processor {
+		return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
+
+			if task == TaskSaveMail {
+				hash := ""
+				if len(e.Hashes) > 0 {
+					e.QueuedId = e.Hashes[0]
+					hash = e.Hashes[0]
+
+					var stringer fmt.Stringer
+					// a compressor was set
+					if c, ok := e.Values["zlib-compressor"]; ok {
+						stringer = c.(*compressor)
+					} else {
+						stringer = e
+					}
+					redisErr = redisClient.redisConnection(config.RedisInterface)
+
+					if redisErr == nil {
+						_, doErr := redisClient.conn.Do("SETEX", hash, config.RedisExpireSeconds, stringer)
+						if doErr != nil {
+							redisErr = doErr
+						}
+					}
+					if redisErr != nil {
+						Log().WithError(redisErr).Warn("Error while talking to redis")
+						result := NewResult(response.Canned.FailBackendTransaction)
+						return result, redisErr
+					} else {
+						e.Values["redis"] = "redis" // the backend system will know to look in redis for the message data
+					}
+				} else {
+					Log().Error("Redis needs a Hash() process before it")
+				}
+
+				return p.Process(e, task)
+			} else {
+				// nothing to do for this task
+				return p.Process(e, task)
+			}
+
+		})
+	}
+}

+ 48 - 0
backends/processor.go

@@ -0,0 +1,48 @@
+package backends
+
+import (
+	"github.com/flashmob/go-guerrilla/mail"
+)
+
+type SelectTask int
+
+const (
+	TaskSaveMail SelectTask = iota
+	TaskValidateRcpt
+)
+
+func (o SelectTask) String() string {
+	switch o {
+	case TaskSaveMail:
+		return "save mail"
+	case TaskValidateRcpt:
+		return "validate recipient"
+	}
+	return "[unnamed task]"
+}
+
+var BackendResultOK = NewResult("200 OK")
+
+// Our processor is defined as something that processes the envelope and returns a result and error
+type Processor interface {
+	Process(*mail.Envelope, SelectTask) (Result, error)
+}
+
+// Signature of Processor
+type ProcessWith func(*mail.Envelope, SelectTask) (Result, error)
+
+// Make ProcessWith will satisfy the Processor interface
+func (f ProcessWith) Process(e *mail.Envelope, task SelectTask) (Result, error) {
+	// delegate to the anonymous function
+	return f(e, task)
+}
+
+// DefaultProcessor is a undecorated worker that does nothing
+// Notice DefaultProcessor has no knowledge of the other decorators that have orthogonal concerns.
+type DefaultProcessor struct{}
+
+// do nothing except return the result
+// (this is the last call in the decorator stack, if it got here, then all is good)
+func (w DefaultProcessor) Process(e *mail.Envelope, task SelectTask) (Result, error) {
+	return BackendResultOK, nil
+}

+ 16 - 0
backends/validate.go

@@ -0,0 +1,16 @@
+package backends
+
+import (
+	"errors"
+)
+
+type RcptError error
+
+var (
+	NoSuchUser          = RcptError(errors.New("no such user"))
+	StorageNotAvailable = RcptError(errors.New("storage not available"))
+	StorageTooBusy      = RcptError(errors.New("stoarge too busy"))
+	StorageTimeout      = RcptError(errors.New("stoarge timeout"))
+	QuotaExceeded       = RcptError(errors.New("quota exceeded"))
+	UserSuspended       = RcptError(errors.New("user suspended"))
+)

+ 13 - 21
client.go

@@ -10,8 +10,8 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
-	"github.com/flashmob/go-guerrilla/envelope"
 	"github.com/flashmob/go-guerrilla/log"
 	"github.com/flashmob/go-guerrilla/log"
+	"github.com/flashmob/go-guerrilla/mail"
 )
 )
 
 
 // ClientState indicates which part of the SMTP transaction a given client is in.
 // ClientState indicates which part of the SMTP transaction a given client is in.
@@ -31,7 +31,7 @@ const (
 )
 )
 
 
 type client struct {
 type client struct {
-	*envelope.Envelope
+	*mail.Envelope
 	ID          uint64
 	ID          uint64
 	ConnectedAt time.Time
 	ConnectedAt time.Time
 	KilledAt    time.Time
 	KilledAt    time.Time
@@ -51,19 +51,20 @@ type client struct {
 	log       log.Logger
 	log       log.Logger
 }
 }
 
 
-// Allocate a new client
-func NewClient(conn net.Conn, clientID uint64, logger log.Logger) *client {
+// NewClient allocates a new client.
+func NewClient(conn net.Conn, clientID uint64, logger log.Logger, envelope *mail.Pool) *client {
 	c := &client{
 	c := &client{
 		conn: conn,
 		conn: conn,
-		Envelope: &envelope.Envelope{
-			RemoteAddress: getRemoteAddr(conn),
-		},
+		// Envelope will be borrowed from the envelope pool
+		// the envelope could be 'detached' from the client later when processing
+		Envelope:    envelope.Borrow(getRemoteAddr(conn), clientID),
 		ConnectedAt: time.Now(),
 		ConnectedAt: time.Now(),
 		bufin:       newSMTPBufferedReader(conn),
 		bufin:       newSMTPBufferedReader(conn),
 		bufout:      bufio.NewWriter(conn),
 		bufout:      bufio.NewWriter(conn),
 		ID:          clientID,
 		ID:          clientID,
 		log:         logger,
 		log:         logger,
 	}
 	}
+
 	// used for reading the DATA state
 	// used for reading the DATA state
 	c.smtpReader = textproto.NewReader(c.bufin.Reader)
 	c.smtpReader = textproto.NewReader(c.bufin.Reader)
 	return c
 	return c
@@ -113,18 +114,14 @@ func (c *client) sendResponse(r ...interface{}) {
 // -End of DATA command
 // -End of DATA command
 // TLS handhsake
 // TLS handhsake
 func (c *client) resetTransaction() {
 func (c *client) resetTransaction() {
-	c.MailFrom = envelope.EmailAddress{}
-	c.RcptTo = []envelope.EmailAddress{}
-	c.Data.Reset()
-	c.Subject = ""
-	c.Header = nil
+	c.Envelope.ResetTransaction()
 }
 }
 
 
 // isInTransaction returns true if the connection is inside a transaction.
 // isInTransaction returns true if the connection is inside a transaction.
 // A transaction starts after a MAIL command gets issued by the client.
 // A transaction starts after a MAIL command gets issued by the client.
 // Call resetTransaction to end the transaction
 // Call resetTransaction to end the transaction
 func (c *client) isInTransaction() bool {
 func (c *client) isInTransaction() bool {
-	isMailFromEmpty := c.MailFrom == (envelope.EmailAddress{})
+	isMailFromEmpty := c.MailFrom == (mail.Address{})
 	if isMailFromEmpty {
 	if isMailFromEmpty {
 		return false
 		return false
 	}
 	}
@@ -159,24 +156,19 @@ func (c *client) closeConn() {
 }
 }
 
 
 // init is called after the client is borrowed from the pool, to get it ready for the connection
 // init is called after the client is borrowed from the pool, to get it ready for the connection
-func (c *client) init(conn net.Conn, clientID uint64) {
+func (c *client) init(conn net.Conn, clientID uint64, ep *mail.Pool) {
 	c.conn = conn
 	c.conn = conn
 	// reset our reader & writer
 	// reset our reader & writer
 	c.bufout.Reset(conn)
 	c.bufout.Reset(conn)
 	c.bufin.Reset(conn)
 	c.bufin.Reset(conn)
-	// reset the data buffer, keep it allocated
-	c.Data.Reset()
 	// reset session data
 	// reset session data
 	c.state = 0
 	c.state = 0
 	c.KilledAt = time.Time{}
 	c.KilledAt = time.Time{}
 	c.ConnectedAt = time.Now()
 	c.ConnectedAt = time.Now()
 	c.ID = clientID
 	c.ID = clientID
-	c.TLS = false
 	c.errors = 0
 	c.errors = 0
-	c.Helo = ""
-	c.Header = nil
-	c.RemoteAddress = getRemoteAddr(conn)
-
+	// borrow an envelope from the envelope pool
+	c.Envelope = ep.Borrow(getRemoteAddr(conn), clientID)
 }
 }
 
 
 // getID returns the client's unique ID
 // getID returns the client's unique ID

+ 91 - 0
cmd/guerrillad/backend_test.go.no

@@ -0,0 +1,91 @@
+package main
+
+import (
+	"testing"
+	"os"
+	"time"
+	"io/ioutil"
+	"github.com/flashmob/go-guerrilla/tests/testcert"
+	"github.com/flashmob/go-guerrilla/log"
+	"runtime"
+	"github.com/spf13/cobra"
+	"sync"
+	"strings"
+	"fmt"
+)
+
+func TestBadBackendReload2(t *testing.T) {
+
+	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
+	os.Truncate("../../tests/testlog", 0)
+	//mainlog, _ = log.GetLogger("../../tests/testlog")
+	mainlog, _ = log.GetLogger("stdout")
+	mainlog.SetLevel("debug")
+	mainlog.Info("are u sure")
+	mainlog.Info("not another word")
+
+	select {
+
+	case <-time.After(10 * time.Second):
+		mainlog.Info("paabix")
+		stacktrace := make([]byte, 8192)
+		length := runtime.Stack(stacktrace, true)
+		_ = length
+		fmt.Fprintf(ioutil.Discard, (string(stacktrace[:length])))
+
+		panic("timed out")
+	}
+
+	mainlog.Info("not another word")
+	sigKill()
+	ioutil.WriteFile("configJsonA.json", []byte(configJsonA), 0644)
+	cmd := &cobra.Command{}
+	configPath = "configJsonA.json"
+	var serveWG sync.WaitGroup
+	serveWG.Add(1)
+	go func() {
+		mainlog.Info("start serve")
+		serve(cmd, []string{})
+		serveWG.Done()
+	}()
+	mainlog.Info("after start")
+	time.Sleep(testPauseDuration)
+
+	// change the config file to the one with a broken backend
+	ioutil.WriteFile("configJsonA.json", []byte(configJsonE), 0644)
+
+	// test SIGHUP via the kill command
+	// Would not work on windows as kill is not available.
+	// TODO: Implement an alternative test for windows.
+	if runtime.GOOS != "windows" {
+		sigHup()
+		time.Sleep(testPauseDuration) // allow sighup to do its job
+		// did the pidfile change as expected?
+		if _, err := os.Stat("./pidfile2.pid"); os.IsNotExist(err) {
+			t.Error("pidfile not changed after sighup SIGHUP", err)
+		}
+	}
+
+	// send kill signal and wait for exit
+	sigKill()
+	serveWG.Wait()
+	//time.Sleep(time.Second * 3)
+	// did backend started as expected?
+	fd, err := os.Open("../../tests/testlog")
+	if err != nil {
+		t.Error(err)
+	}
+	if read, err := ioutil.ReadAll(fd); err == nil {
+		logOutput := string(read)
+		if i := strings.Index(logOutput, "reverted to old backend config"); i < 0 {
+			t.Error("did not revert to old backend config")
+		}
+	}
+
+	// cleanup
+	//os.Truncate("../../tests/testlog", 0)
+	os.Remove("configJsonA.json")
+	os.Remove("./pidfile.pid")
+	os.Remove("./pidfile2.pid")
+
+}

+ 49 - 119
cmd/guerrillad/serve.go

@@ -2,20 +2,16 @@ package main
 
 
 import (
 import (
 	"encoding/json"
 	"encoding/json"
-	"errors"
 	"fmt"
 	"fmt"
-	"io/ioutil"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
 	"os/signal"
 	"os/signal"
-	"reflect"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 
 
 	"github.com/flashmob/go-guerrilla"
 	"github.com/flashmob/go-guerrilla"
-	"github.com/flashmob/go-guerrilla/backends"
 	"github.com/flashmob/go-guerrilla/log"
 	"github.com/flashmob/go-guerrilla/log"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 )
 )
@@ -35,45 +31,52 @@ var (
 	}
 	}
 
 
 	cmdConfig     = CmdConfig{}
 	cmdConfig     = CmdConfig{}
-	signalChannel = make(chan os.Signal, 1) // for trapping SIG_HUP
+	signalChannel = make(chan os.Signal, 1) // for trapping SIGHUP and friends
 	mainlog       log.Logger
 	mainlog       log.Logger
+
+	d guerrilla.Daemon
 )
 )
 
 
 func init() {
 func init() {
 	// log to stderr on startup
 	// log to stderr on startup
-	var logOpenError error
-	if mainlog, logOpenError = log.GetLogger(log.OutputStderr.String()); logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed creating a logger to %s", log.OutputStderr)
+	var err error
+	mainlog, err = log.GetLogger(log.OutputStderr.String())
+	if err != nil {
+		mainlog.WithError(err).Errorf("Failed creating a logger to %s", log.OutputStderr)
 	}
 	}
 	serveCmd.PersistentFlags().StringVarP(&configPath, "config", "c",
 	serveCmd.PersistentFlags().StringVarP(&configPath, "config", "c",
 		"goguerrilla.conf", "Path to the configuration file")
 		"goguerrilla.conf", "Path to the configuration file")
 	// intentionally didn't specify default pidFile; value from config is used if flag is empty
 	// intentionally didn't specify default pidFile; value from config is used if flag is empty
 	serveCmd.PersistentFlags().StringVarP(&pidFile, "pidFile", "p",
 	serveCmd.PersistentFlags().StringVarP(&pidFile, "pidFile", "p",
 		"", "Path to the pid file")
 		"", "Path to the pid file")
-
 	rootCmd.AddCommand(serveCmd)
 	rootCmd.AddCommand(serveCmd)
 }
 }
 
 
-func sigHandler(app guerrilla.Guerrilla) {
-	// handle SIGHUP for reloading the configuration while running
-	signal.Notify(signalChannel, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGINT, syscall.SIGKILL)
-
+func sigHandler() {
+	signal.Notify(signalChannel,
+		syscall.SIGHUP,
+		syscall.SIGTERM,
+		syscall.SIGQUIT,
+		syscall.SIGINT,
+		syscall.SIGKILL,
+		syscall.SIGUSR1,
+	)
 	for sig := range signalChannel {
 	for sig := range signalChannel {
 		if sig == syscall.SIGHUP {
 		if sig == syscall.SIGHUP {
-			// save old config & load in new one
-			oldConfig := cmdConfig
-			newConfig := CmdConfig{}
-			err := readConfig(configPath, pidFile, &newConfig)
-			if err != nil {
-				mainlog.WithError(err).Error("Error while ReadConfig (reload)")
-			} else {
-				cmdConfig = newConfig
-				mainlog.Infof("Configuration was reloaded at %s", guerrilla.ConfigLoadTime)
-				cmdConfig.emitChangeEvents(&oldConfig, app)
-			}
+			d.ReloadConfigFile(configPath)
+		} else if sig == syscall.SIGUSR1 {
+			d.ReopenLogs()
 		} else if sig == syscall.SIGTERM || sig == syscall.SIGQUIT || sig == syscall.SIGINT {
 		} else if sig == syscall.SIGTERM || sig == syscall.SIGQUIT || sig == syscall.SIGINT {
 			mainlog.Infof("Shutdown signal caught")
 			mainlog.Infof("Shutdown signal caught")
-			app.Shutdown()
+			go func() {
+				select {
+				// exit if graceful shutdown not finished in 60 sec.
+				case <-time.After(time.Second * 60):
+					mainlog.Error("graceful shutdown timed out")
+					os.Exit(1)
+				}
+			}()
+			d.Shutdown()
 			mainlog.Infof("Shutdown completed, exiting.")
 			mainlog.Infof("Shutdown completed, exiting.")
 			return
 			return
 		} else {
 		} else {
@@ -83,32 +86,14 @@ func sigHandler(app guerrilla.Guerrilla) {
 	}
 	}
 }
 }
 
 
-func subscribeBackendEvent(event string, backend backends.Backend, app guerrilla.Guerrilla) {
-
-	app.Subscribe(event, func(cmdConfig *CmdConfig) {
-		logger, _ := log.GetLogger(cmdConfig.LogFile)
-		var err error
-		if err = backend.Shutdown(); err != nil {
-			logger.WithError(err).Warn("Backend failed to shutdown")
-			return
-		}
-		backend, err = backends.New(cmdConfig.BackendName, cmdConfig.BackendConfig, logger)
-		if err != nil {
-			logger.WithError(err).Fatalf("Error while loading the backend %q",
-				cmdConfig.BackendName)
-		} else {
-			logger.Info("Backend started:", cmdConfig.BackendName)
-		}
-	})
-}
-
 func serve(cmd *cobra.Command, args []string) {
 func serve(cmd *cobra.Command, args []string) {
 	logVersion()
 	logVersion()
-
-	err := readConfig(configPath, pidFile, &cmdConfig)
+	d = guerrilla.Daemon{Logger: mainlog}
+	err := readConfig(configPath, pidFile)
 	if err != nil {
 	if err != nil {
 		mainlog.WithError(err).Fatal("Error while reading config")
 		mainlog.WithError(err).Fatal("Error while reading config")
 	}
 	}
+	mainlog.SetLevel(cmdConfig.LogLevel)
 
 
 	// Check that max clients is not greater than system open file limit.
 	// Check that max clients is not greater than system open file limit.
 	fileLimit := getFileLimit()
 	fileLimit := getFileLimit()
@@ -124,40 +109,12 @@ func serve(cmd *cobra.Command, args []string) {
 		}
 		}
 	}
 	}
 
 
-	// Backend setup
-	var backend backends.Backend
-	backend, err = backends.New(cmdConfig.BackendName, cmdConfig.BackendConfig, mainlog)
-	if err != nil {
-		mainlog.WithError(err).Fatalf("Error while loading the backend %q",
-			cmdConfig.BackendName)
-	}
-
-	app, err := guerrilla.New(&cmdConfig.AppConfig, backend, mainlog)
+	err = d.Start()
 	if err != nil {
 	if err != nil {
 		mainlog.WithError(err).Error("Error(s) when creating new server(s)")
 		mainlog.WithError(err).Error("Error(s) when creating new server(s)")
+		os.Exit(1)
 	}
 	}
-
-	// start the app
-	err = app.Start()
-	if err != nil {
-		mainlog.WithError(err).Error("Error(s) when starting server(s)")
-	}
-	subscribeBackendEvent("config_change:backend_config", backend, app)
-	subscribeBackendEvent("config_change:backend_name", backend, app)
-	// Write out our PID
-	writePid(cmdConfig.PidFile)
-	// ...and write out our pid whenever the file name changes in the config
-	app.Subscribe("config_change:pid_file", func(ac *guerrilla.AppConfig) {
-		writePid(ac.PidFile)
-	})
-	// change the logger from stdrerr to one from config
-	mainlog.Infof("main log configured to %s", cmdConfig.LogFile)
-	var logOpenError error
-	if mainlog, logOpenError = log.GetLogger(cmdConfig.LogFile); logOpenError != nil {
-		mainlog.WithError(logOpenError).Errorf("Failed changing to a custom logger [%s]", cmdConfig.LogFile)
-	}
-	app.SetLogger(mainlog)
-	sigHandler(app)
+	sigHandler()
 
 
 }
 }
 
 
@@ -165,52 +122,42 @@ func serve(cmd *cobra.Command, args []string) {
 // the the command line interface.
 // the the command line interface.
 type CmdConfig struct {
 type CmdConfig struct {
 	guerrilla.AppConfig
 	guerrilla.AppConfig
-	BackendName   string                 `json:"backend_name"`
-	BackendConfig backends.BackendConfig `json:"backend_config"`
 }
 }
 
 
 func (c *CmdConfig) load(jsonBytes []byte) error {
 func (c *CmdConfig) load(jsonBytes []byte) error {
-	c.AppConfig.Load(jsonBytes)
 	err := json.Unmarshal(jsonBytes, &c)
 	err := json.Unmarshal(jsonBytes, &c)
 	if err != nil {
 	if err != nil {
 		return fmt.Errorf("Could not parse config file: %s", err.Error())
 		return fmt.Errorf("Could not parse config file: %s", err.Error())
+	} else {
+		// load in guerrilla.AppConfig
+		return c.AppConfig.Load(jsonBytes)
 	}
 	}
-	return nil
 }
 }
 
 
 func (c *CmdConfig) emitChangeEvents(oldConfig *CmdConfig, app guerrilla.Guerrilla) {
 func (c *CmdConfig) emitChangeEvents(oldConfig *CmdConfig, app guerrilla.Guerrilla) {
-	// has backend changed?
-	if !reflect.DeepEqual((*c).BackendConfig, (*oldConfig).BackendConfig) {
-		app.Publish("config_change:backend_config", c)
-	}
-	if c.BackendName != oldConfig.BackendName {
-		app.Publish("config_change:backend_name", c)
-	}
+	// if your CmdConfig has any extra fields, you can emit events here
+	// ...
 	// call other emitChangeEvents
 	// call other emitChangeEvents
 	c.AppConfig.EmitChangeEvents(&oldConfig.AppConfig, app)
 	c.AppConfig.EmitChangeEvents(&oldConfig.AppConfig, app)
 }
 }
 
 
 // ReadConfig which should be called at startup, or when a SIG_HUP is caught
 // ReadConfig which should be called at startup, or when a SIG_HUP is caught
-func readConfig(path string, pidFile string, config *CmdConfig) error {
-	// load in the config.
-	data, err := ioutil.ReadFile(path)
+func readConfig(path string, pidFile string) error {
+	// Load in the config.
+	// Note here is the only place we can make an exception to the
+	// "treat config values as immutable". For example, here the
+	// command line flags can override config values
+	appConfig, err := d.LoadConfig(path)
 	if err != nil {
 	if err != nil {
 		return fmt.Errorf("Could not read config file: %s", err.Error())
 		return fmt.Errorf("Could not read config file: %s", err.Error())
 	}
 	}
-	if err := config.load(data); err != nil {
-		return err
-	}
 	// override config pidFile with with flag from the command line
 	// override config pidFile with with flag from the command line
 	if len(pidFile) > 0 {
 	if len(pidFile) > 0 {
-		config.AppConfig.PidFile = pidFile
-	} else if len(config.AppConfig.PidFile) == 0 {
-		config.AppConfig.PidFile = defaultPidFile
+		appConfig.PidFile = pidFile
+	} else if len(appConfig.PidFile) == 0 {
+		appConfig.PidFile = defaultPidFile
 	}
 	}
-
-	if len(config.AllowedHosts) == 0 {
-		return errors.New("Empty `allowed_hosts` is not allowed")
-	}
-	guerrilla.ConfigLoadTime = time.Now()
+	d.SetConfig(&appConfig)
 	return nil
 	return nil
 }
 }
 
 
@@ -226,20 +173,3 @@ func getFileLimit() int {
 	}
 	}
 	return limit
 	return limit
 }
 }
-
-func writePid(pidFile string) {
-	if len(pidFile) > 0 {
-		if f, err := os.Create(pidFile); err == nil {
-			defer f.Close()
-			pid := os.Getpid()
-			if _, err := f.WriteString(fmt.Sprintf("%d", pid)); err == nil {
-				f.Sync()
-				mainlog.Infof("pid_file (%s) written with pid:%v", pidFile, pid)
-			} else {
-				mainlog.WithError(err).Fatalf("Error while writing pidFile (%s)", pidFile)
-			}
-		} else {
-			mainlog.WithError(err).Fatalf("Error while creating pidFile (%s)", pidFile)
-		}
-	}
-}

+ 282 - 113
cmd/guerrillad/serve_test.go

@@ -3,6 +3,12 @@ package main
 import (
 import (
 	"crypto/tls"
 	"crypto/tls"
 	"encoding/json"
 	"encoding/json"
+	"github.com/flashmob/go-guerrilla"
+	"github.com/flashmob/go-guerrilla/backends"
+	"github.com/flashmob/go-guerrilla/log"
+	test "github.com/flashmob/go-guerrilla/tests"
+	"github.com/flashmob/go-guerrilla/tests/testcert"
+	"github.com/spf13/cobra"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
@@ -12,13 +18,6 @@ import (
 	"sync"
 	"sync"
 	"testing"
 	"testing"
 	"time"
 	"time"
-
-	"github.com/flashmob/go-guerrilla"
-	"github.com/flashmob/go-guerrilla/backends"
-	"github.com/flashmob/go-guerrilla/log"
-	test "github.com/flashmob/go-guerrilla/tests"
-	"github.com/flashmob/go-guerrilla/tests/testcert"
-	"github.com/spf13/cobra"
 )
 )
 
 
 var configJsonA = `
 var configJsonA = `
@@ -33,8 +32,9 @@ var configJsonA = `
       "guerrillamail.net",
       "guerrillamail.net",
       "guerrillamail.org"
       "guerrillamail.org"
     ],
     ],
-    "backend_name": "dummy",
     "backend_config": {
     "backend_config": {
+    	"save_workers_size" : 1,
+    	"save_process": "HeadersParser|Debugger",
         "log_received_mails": true
         "log_received_mails": true
     },
     },
     "servers" : [
     "servers" : [
@@ -45,7 +45,7 @@ var configJsonA = `
             "private_key_file":"../..//tests/mail2.guerrillamail.com.key.pem",
             "private_key_file":"../..//tests/mail2.guerrillamail.com.key.pem",
             "public_key_file":"../../tests/mail2.guerrillamail.com.cert.pem",
             "public_key_file":"../../tests/mail2.guerrillamail.com.cert.pem",
             "timeout":180,
             "timeout":180,
-            "listen_interface":"127.0.0.1:25",
+            "listen_interface":"127.0.0.1:3536",
             "start_tls_on":true,
             "start_tls_on":true,
             "tls_always_on":false,
             "tls_always_on":false,
             "max_clients": 1000,
             "max_clients": 1000,
@@ -81,8 +81,9 @@ var configJsonB = `
       "guerrillamail.net",
       "guerrillamail.net",
       "guerrillamail.org"
       "guerrillamail.org"
     ],
     ],
-    "backend_name": "dummy",
     "backend_config": {
     "backend_config": {
+    	"save_workers_size" : 1,
+    	"save_process": "HeadersParser|Debugger",
         "log_received_mails": false
         "log_received_mails": false
     },
     },
     "servers" : [
     "servers" : [
@@ -93,7 +94,7 @@ var configJsonB = `
             "private_key_file":"../..//tests/mail2.guerrillamail.com.key.pem",
             "private_key_file":"../..//tests/mail2.guerrillamail.com.key.pem",
             "public_key_file":"../../tests/mail2.guerrillamail.com.cert.pem",
             "public_key_file":"../../tests/mail2.guerrillamail.com.cert.pem",
             "timeout":180,
             "timeout":180,
-            "listen_interface":"127.0.0.1:25",
+            "listen_interface":"127.0.0.1:3536",
             "start_tls_on":true,
             "start_tls_on":true,
             "tls_always_on":false,
             "tls_always_on":false,
             "max_clients": 1000,
             "max_clients": 1000,
@@ -127,7 +128,10 @@ var configJsonC = `
             "redis_interface" : "127.0.0.1:6379",
             "redis_interface" : "127.0.0.1:6379",
             "redis_expire_seconds" : 7200,
             "redis_expire_seconds" : 7200,
             "save_workers_size" : 3,
             "save_workers_size" : 3,
-            "primary_mail_host":"sharklasers.com"
+            "primary_mail_host":"sharklasers.com",
+            "save_workers_size" : 1,
+	    "save_process": "HeadersParser|Debugger",
+	    "log_received_mails": true
         },
         },
     "servers" : [
     "servers" : [
         {
         {
@@ -173,8 +177,9 @@ var configJsonD = `
       "guerrillamail.net",
       "guerrillamail.net",
       "guerrillamail.org"
       "guerrillamail.org"
     ],
     ],
-    "backend_name": "dummy",
     "backend_config": {
     "backend_config": {
+        "save_workers_size" : 1,
+    	"save_process": "HeadersParser|Debugger",
         "log_received_mails": false
         "log_received_mails": false
     },
     },
     "servers" : [
     "servers" : [
@@ -208,6 +213,65 @@ var configJsonD = `
 }
 }
 `
 `
 
 
+// adds 127.0.0.1:4655, a secure server
+var configJsonE = `
+{
+    "log_file" : "../../tests/testlog",
+    "log_level" : "debug",
+    "pid_file" : "./pidfile2.pid",
+    "allowed_hosts": [
+      "guerrillamail.com",
+      "guerrillamailblock.com",
+      "sharklasers.com",
+      "guerrillamail.net",
+      "guerrillamail.org"
+    ],
+    "backend_config" :
+        {
+            "save_process_old": "HeadersParser|Debugger|Hasher|Header|Compressor|Redis|MySql",
+            "save_process": "GuerrillaRedisDB",
+            "log_received_mails" : true,
+            "mysql_db":"gmail_mail",
+            "mysql_host":"127.0.0.1:3306",
+            "mysql_pass":"secret",
+            "mysql_user":"root",
+            "mail_table":"new_mail",
+            "redis_interface" : "127.0.0.1:6379",
+             "redis_expire_seconds" : 7200,
+            "save_workers_size" : 3,
+            "primary_mail_host":"sharklasers.com"
+        },
+    "servers" : [
+        {
+            "is_enabled" : true,
+            "host_name":"mail.test.com",
+            "max_size": 1000000,
+            "private_key_file":"../..//tests/mail2.guerrillamail.com.key.pem",
+            "public_key_file":"../../tests/mail2.guerrillamail.com.cert.pem",
+            "timeout":180,
+            "listen_interface":"127.0.0.1:2552",
+            "start_tls_on":true,
+            "tls_always_on":false,
+            "max_clients": 1000,
+            "log_file" : "../../tests/testlog"
+        },
+        {
+            "is_enabled" : true,
+            "host_name":"secure.test.com",
+            "max_size":1000000,
+            "private_key_file":"../..//tests/mail2.guerrillamail.com.key.pem",
+            "public_key_file":"../../tests/mail2.guerrillamail.com.cert.pem",
+            "timeout":180,
+            "listen_interface":"127.0.0.1:4655",
+            "start_tls_on":false,
+            "tls_always_on":true,
+            "max_clients":500,
+            "log_file" : "../../tests/testlog"
+        }
+    ]
+}
+`
+
 const testPauseDuration = time.Millisecond * 600
 const testPauseDuration = time.Millisecond * 600
 
 
 // reload config
 // reload config
@@ -243,59 +307,66 @@ func sigKill() {
 // make sure that we get all the config change events
 // make sure that we get all the config change events
 func TestCmdConfigChangeEvents(t *testing.T) {
 func TestCmdConfigChangeEvents(t *testing.T) {
 
 
-	oldconf := &CmdConfig{}
-	oldconf.load([]byte(configJsonA))
+	oldconf := &guerrilla.AppConfig{}
+	if err := oldconf.Load([]byte(configJsonA)); err != nil {
+		t.Error("configJsonA is invalid", err)
+	}
 
 
-	newconf := &CmdConfig{}
-	newconf.load([]byte(configJsonB))
+	newconf := &guerrilla.AppConfig{}
+	if err := newconf.Load([]byte(configJsonB)); err != nil {
+		t.Error("configJsonB is invalid", err)
+	}
 
 
-	newerconf := &CmdConfig{}
-	newerconf.load([]byte(configJsonC))
+	newerconf := &guerrilla.AppConfig{}
+	if err := newerconf.Load([]byte(configJsonC)); err != nil {
+		t.Error("configJsonC is invalid", err)
+	}
 
 
-	expectedEvents := map[string]bool{
-		"config_change:backend_config": false,
-		"config_change:backend_name":   false,
-		"server_change:new_server":     false,
+	expectedEvents := map[guerrilla.Event]bool{
+		guerrilla.EventConfigBackendConfig: false,
+		guerrilla.EventConfigServerNew:     false,
 	}
 	}
-	mainlog, _ = log.GetLogger("off")
+	mainlog, _ = log.GetLogger("../../tests/testlog")
 
 
 	bcfg := backends.BackendConfig{"log_received_mails": true}
 	bcfg := backends.BackendConfig{"log_received_mails": true}
-	backend, err := backends.New("dummy", bcfg, mainlog)
-	app, err := guerrilla.New(&oldconf.AppConfig, backend, mainlog)
+	backend, err := backends.New(bcfg, mainlog)
+	app, err := guerrilla.New(oldconf, backend, mainlog)
 	if err != nil {
 	if err != nil {
 		//log.Info("Failed to create new app", err)
 		//log.Info("Failed to create new app", err)
 	}
 	}
-	toUnsubscribe := map[string]func(c *CmdConfig){}
-	toUnsubscribeS := map[string]func(c *guerrilla.ServerConfig){}
+	toUnsubscribe := map[guerrilla.Event]func(c *guerrilla.AppConfig){}
+	toUnsubscribeS := map[guerrilla.Event]func(c *guerrilla.ServerConfig){}
 
 
 	for event := range expectedEvents {
 	for event := range expectedEvents {
 		// Put in anon func since range is overwriting event
 		// Put in anon func since range is overwriting event
-		func(e string) {
-
-			if strings.Index(e, "server_change") == 0 {
+		func(e guerrilla.Event) {
+			if strings.Index(e.String(), "server_change") == 0 {
 				f := func(c *guerrilla.ServerConfig) {
 				f := func(c *guerrilla.ServerConfig) {
 					expectedEvents[e] = true
 					expectedEvents[e] = true
 				}
 				}
-				app.Subscribe(event, f)
-				toUnsubscribeS[event] = f
+				app.Subscribe(e, f)
+				toUnsubscribeS[e] = f
 			} else {
 			} else {
-				f := func(c *CmdConfig) {
+				f := func(c *guerrilla.AppConfig) {
 					expectedEvents[e] = true
 					expectedEvents[e] = true
 				}
 				}
-				app.Subscribe(event, f)
-				toUnsubscribe[event] = f
+				app.Subscribe(e, f)
+				toUnsubscribe[e] = f
 			}
 			}
 
 
 		}(event)
 		}(event)
 	}
 	}
 
 
 	// emit events
 	// emit events
-	newconf.emitChangeEvents(oldconf, app)
-	newerconf.emitChangeEvents(newconf, app)
+	newconf.EmitChangeEvents(oldconf, app)
+	newerconf.EmitChangeEvents(newconf, app)
 	// unsubscribe
 	// unsubscribe
 	for unevent, unfun := range toUnsubscribe {
 	for unevent, unfun := range toUnsubscribe {
 		app.Unsubscribe(unevent, unfun)
 		app.Unsubscribe(unevent, unfun)
 	}
 	}
+	for unevent, unfun := range toUnsubscribeS {
+		app.Unsubscribe(unevent, unfun)
+	}
 
 
 	for event, val := range expectedEvents {
 	for event, val := range expectedEvents {
 		if val == false {
 		if val == false {
@@ -311,6 +382,7 @@ func TestCmdConfigChangeEvents(t *testing.T) {
 
 
 // start server, change config, send SIG HUP, confirm that the pidfile changed & backend reloaded
 // start server, change config, send SIG HUP, confirm that the pidfile changed & backend reloaded
 func TestServe(t *testing.T) {
 func TestServe(t *testing.T) {
+	os.Truncate("../../tests/testlog", 0)
 	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
 	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
 
 
 	mainlog, _ = log.GetLogger("../../tests/testlog")
 	mainlog, _ = log.GetLogger("../../tests/testlog")
@@ -344,12 +416,7 @@ func TestServe(t *testing.T) {
 	// Would not work on windows as kill is not available.
 	// Would not work on windows as kill is not available.
 	// TODO: Implement an alternative test for windows.
 	// TODO: Implement an alternative test for windows.
 	if runtime.GOOS != "windows" {
 	if runtime.GOOS != "windows" {
-		ecmd := exec.Command("kill", "-HUP", string(data))
-		_, err = ecmd.Output()
-		if err != nil {
-			t.Error("could not SIGHUP", err)
-			t.FailNow()
-		}
+		sigHup()
 		time.Sleep(testPauseDuration) // allow sighup to do its job
 		time.Sleep(testPauseDuration) // allow sighup to do its job
 		// did the pidfile change as expected?
 		// did the pidfile change as expected?
 		if _, err := os.Stat("./pidfile2.pid"); os.IsNotExist(err) {
 		if _, err := os.Stat("./pidfile2.pid"); os.IsNotExist(err) {
@@ -358,6 +425,7 @@ func TestServe(t *testing.T) {
 	}
 	}
 	// send kill signal and wait for exit
 	// send kill signal and wait for exit
 	sigKill()
 	sigKill()
+	// wait for exit
 	serveWG.Wait()
 	serveWG.Wait()
 
 
 	// did backend started as expected?
 	// did backend started as expected?
@@ -367,13 +435,13 @@ func TestServe(t *testing.T) {
 	}
 	}
 	if read, err := ioutil.ReadAll(fd); err == nil {
 	if read, err := ioutil.ReadAll(fd); err == nil {
 		logOutput := string(read)
 		logOutput := string(read)
-		if i := strings.Index(logOutput, "Backend started:dummy"); i < 0 {
+		if i := strings.Index(logOutput, "new backend started"); i < 0 {
 			t.Error("Dummy backend not restared")
 			t.Error("Dummy backend not restared")
 		}
 		}
 	}
 	}
 
 
 	// cleanup
 	// cleanup
-	os.Truncate("../../tests/testlog", 0)
+
 	os.Remove("configJsonA.json")
 	os.Remove("configJsonA.json")
 	os.Remove("./pidfile.pid")
 	os.Remove("./pidfile.pid")
 	os.Remove("./pidfile2.pid")
 	os.Remove("./pidfile2.pid")
@@ -649,7 +717,7 @@ func TestAllowedHostsEvent(t *testing.T) {
 
 
 	// now change the config by adding a host to allowed hosts
 	// now change the config by adding a host to allowed hosts
 
 
-	newConf := conf // copy the cmdConfg
+	newConf := conf
 	newConf.AllowedHosts = append(newConf.AllowedHosts, "grr.la")
 	newConf.AllowedHosts = append(newConf.AllowedHosts, "grr.la")
 	if jsonbytes, err := json.Marshal(newConf); err == nil {
 	if jsonbytes, err := json.Marshal(newConf); err == nil {
 		ioutil.WriteFile("configJsonD.json", []byte(jsonbytes), 0644)
 		ioutil.WriteFile("configJsonD.json", []byte(jsonbytes), 0644)
@@ -689,7 +757,8 @@ func TestAllowedHostsEvent(t *testing.T) {
 		logOutput := string(read)
 		logOutput := string(read)
 		//fmt.Println(logOutput)
 		//fmt.Println(logOutput)
 		if i := strings.Index(logOutput, "allowed_hosts config changed, a new list was set"); i < 0 {
 		if i := strings.Index(logOutput, "allowed_hosts config changed, a new list was set"); i < 0 {
-			t.Error("did not change allowed_hosts, most likely because Bus.Subscribe(\"config_change:allowed_hosts\" didnt fire")
+			t.Errorf("did not change allowed_hosts, most likely because Bus.Subscribe(\"%s\" didnt fire",
+				guerrilla.EventConfigAllowedHosts)
 		}
 		}
 	}
 	}
 	// cleanup
 	// cleanup
@@ -805,20 +874,63 @@ func TestTLSConfigEvent(t *testing.T) {
 
 
 }
 }
 
 
-// Test for missing TLS certificate, when starting or config reload
+// Testing starting a server with a bad TLS config
+// It should not start, return exit code 1
+func TestBadTLSStart(t *testing.T) {
+	// Need to run the test in a different process by executing a command
+	// because the serve() does os.Exit when starting with a bad TLS config
+	if os.Getenv("BE_CRASHER") == "1" {
+		// do the test
+		// first, remove the good certs, if any
+		if err := os.Remove("./../../tests/mail2.guerrillamail.com.cert.pem"); err != nil {
+			mainlog.WithError(err).Error("could not remove ./../../tests/mail2.guerrillamail.com.cert.pem")
+		} else {
+			mainlog.Info("removed ./../../tests/mail2.guerrillamail.com.cert.pem")
+		}
+		// next run the server
+		ioutil.WriteFile("configJsonD.json", []byte(configJsonD), 0644)
+		conf := &CmdConfig{}           // blank one
+		conf.load([]byte(configJsonD)) // load configJsonD
+
+		cmd := &cobra.Command{}
+		configPath = "configJsonD.json"
+		var serveWG sync.WaitGroup
+
+		serveWG.Add(1)
+		go func() {
+			serve(cmd, []string{})
+			serveWG.Done()
+		}()
+		time.Sleep(testPauseDuration)
+
+		sigKill()
+		serveWG.Wait()
+
+		return
+	}
+	cmd := exec.Command(os.Args[0], "-test.run=TestBadTLSStart")
+	cmd.Env = append(os.Environ(), "BE_CRASHER=1")
+	err := cmd.Run()
+	if e, ok := err.(*exec.ExitError); ok && !e.Success() {
+		return
+	}
+	t.Error("Server started with a bad TLS config, was expecting exit status 1")
+	// cleanup
+	os.Truncate("../../tests/testlog", 0)
+	os.Remove("configJsonD.json")
+	os.Remove("./pidfile.pid")
+}
 
 
-func TestBadTLS(t *testing.T) {
+// Test config reload with a bad TLS config
+// It should ignore the config reload, keep running with old settings
+func TestBadTLSReload(t *testing.T) {
 	mainlog, _ = log.GetLogger("../../tests/testlog")
 	mainlog, _ = log.GetLogger("../../tests/testlog")
-	if err := os.Remove("./../../tests/mail2.guerrillamail.com.cert.pem"); err != nil {
-		mainlog.WithError(err).Error("could not remove ./../../tests/mail2.guerrillamail.com.cert.pem")
-	} else {
-		mainlog.Info("removed ./../../tests/mail2.guerrillamail.com.cert.pem")
-	}
+	// start with a good vert
+	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
 	// start the server by emulating the serve command
 	// start the server by emulating the serve command
 	ioutil.WriteFile("configJsonD.json", []byte(configJsonD), 0644)
 	ioutil.WriteFile("configJsonD.json", []byte(configJsonD), 0644)
 	conf := &CmdConfig{}           // blank one
 	conf := &CmdConfig{}           // blank one
 	conf.load([]byte(configJsonD)) // load configJsonD
 	conf.load([]byte(configJsonD)) // load configJsonD
-	conf.Servers[0].Timeout = 1
 	cmd := &cobra.Command{}
 	cmd := &cobra.Command{}
 	configPath = "configJsonD.json"
 	configPath = "configJsonD.json"
 	var serveWG sync.WaitGroup
 	var serveWG sync.WaitGroup
@@ -830,79 +942,67 @@ func TestBadTLS(t *testing.T) {
 	}()
 	}()
 	time.Sleep(testPauseDuration)
 	time.Sleep(testPauseDuration)
 
 
-	// Test STARTTLS handshake
-	testTlsHandshake := func() {
-		if conn, buffin, err := test.Connect(conf.AppConfig.Servers[0], 20); err != nil {
-			t.Error("Could not connect to server", conf.AppConfig.Servers[0].ListenInterface, err)
-		} else {
-			conn.SetDeadline(time.Now().Add(time.Second))
-			if result, err := test.Command(conn, buffin, "HELO"); err == nil {
-				expect := "250 mail.test.com Hello"
-				if strings.Index(result, expect) != 0 {
-					t.Error("Expected", expect, "but got", result)
-				} else {
-					if result, err = test.Command(conn, buffin, "STARTTLS"); err == nil {
-						expect := "220 2.0.0 Ready to start TLS"
-						if strings.Index(result, expect) != 0 {
-							t.Error("Expected:", expect, "but got:", result)
-						} else {
-							tlsConn := tls.Client(conn, &tls.Config{
-								InsecureSkipVerify: true,
-								ServerName:         "127.0.0.1",
-							})
-							if err := tlsConn.Handshake(); err != nil {
-								mainlog.Info("TLS Handshake failed")
-							} else {
-								t.Error("Handshake succeeded, expected it to fail", conf.AppConfig.Servers[0].ListenInterface)
-								conn = tlsConn
-
-							}
-
-						}
-					}
-				}
+	if conn, buffin, err := test.Connect(conf.AppConfig.Servers[0], 20); err != nil {
+		t.Error("Could not connect to server", conf.AppConfig.Servers[0].ListenInterface, err)
+	} else {
+		if result, err := test.Command(conn, buffin, "HELO"); err == nil {
+			expect := "250 mail.test.com Hello"
+			if strings.Index(result, expect) != 0 {
+				t.Error("Expected", expect, "but got", result)
 			}
 			}
-			conn.Close()
 		}
 		}
 	}
 	}
-	testTlsHandshake()
-
 	// write some trash data
 	// write some trash data
 	ioutil.WriteFile("./../../tests/mail2.guerrillamail.com.cert.pem", []byte("trash data"), 0664)
 	ioutil.WriteFile("./../../tests/mail2.guerrillamail.com.cert.pem", []byte("trash data"), 0664)
 	ioutil.WriteFile("./../../tests/mail2.guerrillamail.com.key.pem", []byte("trash data"), 0664)
 	ioutil.WriteFile("./../../tests/mail2.guerrillamail.com.key.pem", []byte("trash data"), 0664)
 
 
-	// generate a new cert
-	//testcert.GenerateCert("mail2.guerrillamail.com", "", 365 * 24 * time.Hour, false, 2048, "P256", "../../tests/")
-	sigHup()
+	newConf := conf // copy the cmdConfg
 
 
+	if jsonbytes, err := json.Marshal(newConf); err == nil {
+		ioutil.WriteFile("configJsonD.json", []byte(jsonbytes), 0644)
+	} else {
+		t.Error(err)
+	}
+	// send a sighup signal to the server to reload config
+	sigHup()
 	time.Sleep(testPauseDuration) // pause for config to reload
 	time.Sleep(testPauseDuration) // pause for config to reload
-	testTlsHandshake()
 
 
-	time.Sleep(testPauseDuration)
-	// send kill signal and wait for exit
+	// we should still be able to to talk to it
+
+	if conn, buffin, err := test.Connect(conf.AppConfig.Servers[0], 20); err != nil {
+		t.Error("Could not connect to server", conf.AppConfig.Servers[0].ListenInterface, err)
+	} else {
+		if result, err := test.Command(conn, buffin, "HELO"); err == nil {
+			expect := "250 mail.test.com Hello"
+			if strings.Index(result, expect) != 0 {
+				t.Error("Expected", expect, "but got", result)
+			}
+		}
+	}
+
 	sigKill()
 	sigKill()
 	serveWG.Wait()
 	serveWG.Wait()
-	// did backend started as expected?
+
+	// did config reload fail as expected?
 	fd, _ := os.Open("../../tests/testlog")
 	fd, _ := os.Open("../../tests/testlog")
 	if read, err := ioutil.ReadAll(fd); err == nil {
 	if read, err := ioutil.ReadAll(fd); err == nil {
 		logOutput := string(read)
 		logOutput := string(read)
 		//fmt.Println(logOutput)
 		//fmt.Println(logOutput)
-		if i := strings.Index(logOutput, "failed to load the new TLS configuration"); i < 0 {
-			t.Error("did not detect TLS load failure")
+		if i := strings.Index(logOutput, "cannot use TLS config for"); i < 0 {
+			t.Error("[127.0.0.1:2552] did not reject our tls config as expected")
 		}
 		}
 	}
 	}
 	// cleanup
 	// cleanup
 	os.Truncate("../../tests/testlog", 0)
 	os.Truncate("../../tests/testlog", 0)
 	os.Remove("configJsonD.json")
 	os.Remove("configJsonD.json")
 	os.Remove("./pidfile.pid")
 	os.Remove("./pidfile.pid")
-
 }
 }
 
 
 // Test for when the server config Timeout value changes
 // Test for when the server config Timeout value changes
 // Start with configJsonD.json
 // Start with configJsonD.json
 
 
 func TestSetTimeoutEvent(t *testing.T) {
 func TestSetTimeoutEvent(t *testing.T) {
-	//mainlog, _ = log.GetLogger("../../tests/testlog")
+	mainlog, _ = log.GetLogger("../../tests/testlog")
 	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
 	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
 	// start the server by emulating the serve command
 	// start the server by emulating the serve command
 	ioutil.WriteFile("configJsonD.json", []byte(configJsonD), 0644)
 	ioutil.WriteFile("configJsonD.json", []byte(configJsonD), 0644)
@@ -919,16 +1019,6 @@ func TestSetTimeoutEvent(t *testing.T) {
 	}()
 	}()
 	time.Sleep(testPauseDuration)
 	time.Sleep(testPauseDuration)
 
 
-	if conn, buffin, err := test.Connect(conf.AppConfig.Servers[0], 20); err != nil {
-		t.Error("Could not connect to server", conf.AppConfig.Servers[0].ListenInterface, err)
-	} else {
-		if result, err := test.Command(conn, buffin, "HELO"); err == nil {
-			expect := "250 mail.test.com Hello"
-			if strings.Index(result, expect) != 0 {
-				t.Error("Expected", expect, "but got", result)
-			}
-		}
-	}
 	// set the timeout to 1 second
 	// set the timeout to 1 second
 
 
 	newConf := conf // copy the cmdConfg
 	newConf := conf // copy the cmdConfg
@@ -938,9 +1028,32 @@ func TestSetTimeoutEvent(t *testing.T) {
 	} else {
 	} else {
 		t.Error(err)
 		t.Error(err)
 	}
 	}
+
 	// send a sighup signal to the server to reload config
 	// send a sighup signal to the server to reload config
 	sigHup()
 	sigHup()
-	time.Sleep(time.Millisecond * 1200) // pause for connection to timeout
+	time.Sleep(testPauseDuration) // config reload
+
+	var waitTimeout sync.WaitGroup
+	if conn, buffin, err := test.Connect(conf.AppConfig.Servers[0], 20); err != nil {
+		t.Error("Could not connect to server", conf.AppConfig.Servers[0].ListenInterface, err)
+	} else {
+		waitTimeout.Add(1)
+		go func() {
+			if result, err := test.Command(conn, buffin, "HELO"); err == nil {
+				expect := "250 mail.test.com Hello"
+				if strings.Index(result, expect) != 0 {
+					t.Error("Expected", expect, "but got", result)
+				} else {
+					b := make([]byte, 1024)
+					conn.Read(b)
+				}
+			}
+			waitTimeout.Done()
+		}()
+	}
+
+	// wait for timeout
+	waitTimeout.Wait()
 
 
 	// so the connection we have opened should timeout by now
 	// so the connection we have opened should timeout by now
 
 
@@ -967,7 +1080,7 @@ func TestSetTimeoutEvent(t *testing.T) {
 // Start in log_level = debug
 // Start in log_level = debug
 // Load config & start server
 // Load config & start server
 func TestDebugLevelChange(t *testing.T) {
 func TestDebugLevelChange(t *testing.T) {
-	//mainlog, _ = log.GetLogger("../../tests/testlog")
+	mainlog, _ = log.GetLogger("../../tests/testlog")
 	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
 	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
 	// start the server by emulating the serve command
 	// start the server by emulating the serve command
 	ioutil.WriteFile("configJsonD.json", []byte(configJsonD), 0644)
 	ioutil.WriteFile("configJsonD.json", []byte(configJsonD), 0644)
@@ -1044,3 +1157,59 @@ func TestDebugLevelChange(t *testing.T) {
 	os.Remove("./pidfile.pid")
 	os.Remove("./pidfile.pid")
 
 
 }
 }
+
+// When reloading with a bad backend config, it should revert to old backend config
+func TestBadBackendReload(t *testing.T) {
+	testcert.GenerateCert("mail2.guerrillamail.com", "", 365*24*time.Hour, false, 2048, "P256", "../../tests/")
+
+	mainlog, _ = log.GetLogger("../../tests/testlog")
+
+	ioutil.WriteFile("configJsonA.json", []byte(configJsonA), 0644)
+	cmd := &cobra.Command{}
+	configPath = "configJsonA.json"
+	var serveWG sync.WaitGroup
+	serveWG.Add(1)
+	go func() {
+		serve(cmd, []string{})
+		serveWG.Done()
+	}()
+	time.Sleep(testPauseDuration)
+
+	// change the config file to the one with a broken backend
+	ioutil.WriteFile("configJsonA.json", []byte(configJsonE), 0644)
+
+	// test SIGHUP via the kill command
+	// Would not work on windows as kill is not available.
+	// TODO: Implement an alternative test for windows.
+	if runtime.GOOS != "windows" {
+		sigHup()
+		time.Sleep(testPauseDuration) // allow sighup to do its job
+		// did the pidfile change as expected?
+		if _, err := os.Stat("./pidfile2.pid"); os.IsNotExist(err) {
+			t.Error("pidfile not changed after sighup SIGHUP", err)
+		}
+	}
+
+	// send kill signal and wait for exit
+	sigKill()
+	serveWG.Wait()
+
+	// did backend started as expected?
+	fd, err := os.Open("../../tests/testlog")
+	if err != nil {
+		t.Error(err)
+	}
+	if read, err := ioutil.ReadAll(fd); err == nil {
+		logOutput := string(read)
+		if i := strings.Index(logOutput, "reverted to old backend config"); i < 0 {
+			t.Error("did not revert to old backend config")
+		}
+	}
+
+	// cleanup
+	os.Truncate("../../tests/testlog", 0)
+	os.Remove("configJsonA.json")
+	os.Remove("./pidfile.pid")
+	os.Remove("./pidfile2.pid")
+
+}

+ 225 - 38
config.go

@@ -1,6 +1,7 @@
 package guerrilla
 package guerrilla
 
 
 import (
 import (
+	"crypto/tls"
 	"encoding/json"
 	"encoding/json"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
@@ -8,76 +9,122 @@ import (
 	"reflect"
 	"reflect"
 	"strings"
 	"strings"
 
 
+	"github.com/flashmob/go-guerrilla/backends"
+	"github.com/flashmob/go-guerrilla/log"
+
 	"github.com/flashmob/go-guerrilla/dashboard"
 	"github.com/flashmob/go-guerrilla/dashboard"
 )
 )
 
 
 // AppConfig is the holder of the configuration of the app
 // AppConfig is the holder of the configuration of the app
 type AppConfig struct {
 type AppConfig struct {
-	Dashboard    dashboard.Config `json:"dashboard"`
-	Servers      []ServerConfig   `json:"servers"`
-	AllowedHosts []string         `json:"allowed_hosts"`
-	PidFile      string           `json:"pid_file"`
-	LogFile      string           `json:"log_file,omitempty"`
-	LogLevel     string           `json:"log_level,omitempty"`
+	// Servers can have one or more items.
+	/// Defaults to 1 server listening on 127.0.0.1:2525
+	Servers []ServerConfig `json:"servers"`
+	// AllowedHosts lists which hosts to accept email for. Defaults to os.Hostname
+	AllowedHosts []string `json:"allowed_hosts"`
+	// PidFile is the path for writing out the process id. No output if empty
+	PidFile string `json:"pid_file"`
+	// LogFile is where the logs go. Use path to file, or "stderr", "stdout"
+	// or "off". Default "stderr"
+	LogFile string `json:"log_file,omitempty"`
+	// LogLevel controls the lowest level we log.
+	// "info", "debug", "error", "panic". Default "info"
+	LogLevel string `json:"log_level,omitempty"`
+	// BackendConfig configures the email envelope processing backend
+	BackendConfig backends.BackendConfig `json:"backend_config"`
+	// Dashboard config configures how analytics are gathered and displayed
+	Dashboard dashboard.Config `json:"dashboard"`
 }
 }
 
 
 // ServerConfig specifies config options for a single server
 // ServerConfig specifies config options for a single server
 type ServerConfig struct {
 type ServerConfig struct {
-	IsEnabled       bool   `json:"is_enabled"`
-	Hostname        string `json:"host_name"`
-	MaxSize         int64  `json:"max_size"`
-	PrivateKeyFile  string `json:"private_key_file"`
-	PublicKeyFile   string `json:"public_key_file"`
-	Timeout         int    `json:"timeout"`
+	// IsEnabled set to true to start the server, false will ignore it
+	IsEnabled bool `json:"is_enabled"`
+	// Hostname will be used in the server's reply to HELO/EHLO. If TLS enabled
+	// make sure that the Hostname matches the cert. Defaults to os.Hostname()
+	Hostname string `json:"host_name"`
+	// MaxSize is the maximum size of an email that will be accepted for delivery.
+	// Defaults to 10 Mebibytes
+	MaxSize int64 `json:"max_size"`
+	// PrivateKeyFile path to cert private key in PEM format. Will be ignored if blank
+	PrivateKeyFile string `json:"private_key_file"`
+	// PublicKeyFile path to cert (public key) chain in PEM format.
+	// Will be ignored if blank
+	PublicKeyFile string `json:"public_key_file"`
+	// Timeout specifies the connection timeout in seconds. Defaults to 30
+	Timeout int `json:"timeout"`
+	// Listen interface specified in <ip>:<port> - defaults to 127.0.0.1:2525
 	ListenInterface string `json:"listen_interface"`
 	ListenInterface string `json:"listen_interface"`
-	StartTLSOn      bool   `json:"start_tls_on,omitempty"`
-	TLSAlwaysOn     bool   `json:"tls_always_on,omitempty"`
-	MaxClients      int    `json:"max_clients"`
-	LogFile         string `json:"log_file,omitempty"`
+	// StartTLSOn should we offer STARTTLS command. Cert must be valid.
+	// False by default
+	StartTLSOn bool `json:"start_tls_on,omitempty"`
+	// TLSAlwaysOn run this server as a pure TLS server, i.e. SMTPS
+	TLSAlwaysOn bool `json:"tls_always_on,omitempty"`
+	// MaxClients controls how many maxiumum clients we can handle at once.
+	// Defaults to 100
+	MaxClients int `json:"max_clients"`
+	// LogFile is where the logs go. Use path to file, or "stderr", "stdout" or "off".
+	// defaults to AppConfig.Log file setting
+	LogFile string `json:"log_file,omitempty"`
 
 
+	// The following used to watch certificate changes so that the TLS can be reloaded
 	_privateKeyFile_mtime int
 	_privateKeyFile_mtime int
 	_publicKeyFile_mtime  int
 	_publicKeyFile_mtime  int
 }
 }
 
 
 // Unmarshalls json data into AppConfig struct and any other initialization of the struct
 // Unmarshalls json data into AppConfig struct and any other initialization of the struct
+// also does validation, returns error if validation failed or something went wrong
 func (c *AppConfig) Load(jsonBytes []byte) error {
 func (c *AppConfig) Load(jsonBytes []byte) error {
 	err := json.Unmarshal(jsonBytes, c)
 	err := json.Unmarshal(jsonBytes, c)
 	if err != nil {
 	if err != nil {
 		return fmt.Errorf("could not parse config file: %s", err)
 		return fmt.Errorf("could not parse config file: %s", err)
 	}
 	}
-	if len(c.AllowedHosts) == 0 {
-		return errors.New("empty AllowedHosts is not allowed")
+	if err = c.setDefaults(); err != nil {
+		return err
+	}
+	if err = c.setBackendDefaults(); err != nil {
+		return err
+	}
+
+	// all servers must be valid in order to continue
+	for _, server := range c.Servers {
+		if errs := server.Validate(); errs != nil {
+			return errs
+		}
 	}
 	}
 
 
 	// read the timestamps for the ssl keys, to determine if they need to be reloaded
 	// read the timestamps for the ssl keys, to determine if they need to be reloaded
 	for i := 0; i < len(c.Servers); i++ {
 	for i := 0; i < len(c.Servers); i++ {
-		if err := c.Servers[i].loadTlsKeyTimestamps(); err != nil {
-			return err
-		}
+		c.Servers[i].loadTlsKeyTimestamps()
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 // Emits any configuration change events onto the event bus.
 // Emits any configuration change events onto the event bus.
 func (c *AppConfig) EmitChangeEvents(oldConfig *AppConfig, app Guerrilla) {
 func (c *AppConfig) EmitChangeEvents(oldConfig *AppConfig, app Guerrilla) {
+	// has backend changed?
+	if !reflect.DeepEqual((*c).BackendConfig, (*oldConfig).BackendConfig) {
+		app.Publish(EventConfigBackendConfig, c)
+	}
+	// has config changed, general check
+	if !reflect.DeepEqual(oldConfig, c) {
+		app.Publish(EventConfigNewConfig, c)
+	}
 	// has 'allowed hosts' changed?
 	// has 'allowed hosts' changed?
 	if !reflect.DeepEqual(oldConfig.AllowedHosts, c.AllowedHosts) {
 	if !reflect.DeepEqual(oldConfig.AllowedHosts, c.AllowedHosts) {
-		app.Publish("config_change:allowed_hosts", c)
+		app.Publish(EventConfigAllowedHosts, c)
 	}
 	}
 	// has pid file changed?
 	// has pid file changed?
 	if strings.Compare(oldConfig.PidFile, c.PidFile) != 0 {
 	if strings.Compare(oldConfig.PidFile, c.PidFile) != 0 {
-		app.Publish("config_change:pid_file", c)
+		app.Publish(EventConfigPidFile, c)
 	}
 	}
 	// has mainlog log changed?
 	// has mainlog log changed?
 	if strings.Compare(oldConfig.LogFile, c.LogFile) != 0 {
 	if strings.Compare(oldConfig.LogFile, c.LogFile) != 0 {
-		app.Publish("config_change:log_file", c)
-	} else {
-		// since config file has not changed, we reload it
-		app.Publish("config_change:reopen_log_file", c)
+		app.Publish(EventConfigLogFile, c)
 	}
 	}
 	// has log level changed?
 	// has log level changed?
 	if strings.Compare(oldConfig.LogLevel, c.LogLevel) != 0 {
 	if strings.Compare(oldConfig.LogLevel, c.LogLevel) != 0 {
-		app.Publish("config_change:log_level", c)
+		app.Publish(EventConfigLogLevel, c)
 	}
 	}
 	// server config changes
 	// server config changes
 	oldServers := oldConfig.getServers()
 	oldServers := oldConfig.getServers()
@@ -86,16 +133,25 @@ func (c *AppConfig) EmitChangeEvents(oldConfig *AppConfig, app Guerrilla) {
 		if oldServer, ok := oldServers[iface]; ok {
 		if oldServer, ok := oldServers[iface]; ok {
 			// since old server exists in the new config, we do not track it anymore
 			// since old server exists in the new config, we do not track it anymore
 			delete(oldServers, iface)
 			delete(oldServers, iface)
+			// so we know the server exists in both old & new configs
 			newServer.emitChangeEvents(oldServer, app)
 			newServer.emitChangeEvents(oldServer, app)
 		} else {
 		} else {
 			// start new server
 			// start new server
-			app.Publish("server_change:new_server", newServer)
+			app.Publish(EventConfigServerNew, newServer)
 		}
 		}
 
 
 	}
 	}
 	// remove any servers that don't exist anymore
 	// remove any servers that don't exist anymore
 	for _, oldserver := range oldServers {
 	for _, oldserver := range oldServers {
-		app.Publish("server_change:remove_server", oldserver)
+		app.Publish(EventConfigServerRemove, oldserver)
+	}
+}
+
+// EmitLogReopen emits log reopen events using existing config
+func (c *AppConfig) EmitLogReopenEvents(app Guerrilla) {
+	app.Publish(EventConfigLogReopen, c)
+	for _, sc := range c.getServers() {
+		app.Publish(EventConfigServerLogReopen, sc)
 	}
 	}
 }
 }
 
 
@@ -108,6 +164,114 @@ func (c *AppConfig) getServers() map[string]*ServerConfig {
 	return servers
 	return servers
 }
 }
 
 
+// setDefaults fills in default server settings for values that were not configured
+// The defaults are:
+// * Server listening to 127.0.0.1:2525
+// * use your hostname to determine your which hosts to accept email for
+// * 100 maximum clients
+// * 10MB max message size
+// * log to Stderr,
+// * log level set to "`debug`"
+// * timeout to 30 sec
+// * Backend configured with the following processors: `HeadersParser|Header|Debugger`
+// where it will log the received emails.
+func (c *AppConfig) setDefaults() error {
+	if c.LogFile == "" {
+		c.LogFile = log.OutputStderr.String()
+	}
+	if c.LogLevel == "" {
+		c.LogLevel = "debug"
+	}
+	if len(c.AllowedHosts) == 0 {
+		if h, err := os.Hostname(); err != nil {
+			return err
+		} else {
+			c.AllowedHosts = append(c.AllowedHosts, h)
+		}
+	}
+	h, err := os.Hostname()
+	if err != nil {
+		return err
+	}
+	if len(c.Servers) == 0 {
+		sc := ServerConfig{}
+		sc.LogFile = c.LogFile
+		sc.ListenInterface = defaultInterface
+		sc.IsEnabled = true
+		sc.Hostname = h
+		sc.MaxClients = 100
+		sc.Timeout = 30
+		sc.MaxSize = 10 << 20 // 10 Mebibytes
+		c.Servers = append(c.Servers, sc)
+	} else {
+		// make sure each server has defaults correctly configured
+		for i := range c.Servers {
+			if c.Servers[i].Hostname == "" {
+				c.Servers[i].Hostname = h
+			}
+			if c.Servers[i].MaxClients == 0 {
+				c.Servers[i].MaxClients = 100
+			}
+			if c.Servers[i].Timeout == 0 {
+				c.Servers[i].Timeout = 20
+			}
+			if c.Servers[i].MaxSize == 0 {
+				c.Servers[i].MaxSize = 10 << 20 // 10 Mebibytes
+			}
+			if c.Servers[i].ListenInterface == "" {
+				return errors.New(fmt.Sprintf("Listen interface not specified for server at index %d", i))
+			}
+			if c.Servers[i].LogFile == "" {
+				c.Servers[i].LogFile = c.LogFile
+			}
+			// validate the server config
+			err = c.Servers[i].Validate()
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// setBackendDefaults sets default values for the backend config,
+// if no backend config was added before starting, then use a default config
+// otherwise, see what required values were missed in the config and add any missing with defaults
+func (c *AppConfig) setBackendDefaults() error {
+
+	if len(c.BackendConfig) == 0 {
+		h, err := os.Hostname()
+		if err != nil {
+			return err
+		}
+		c.BackendConfig = backends.BackendConfig{
+			"log_received_mails": true,
+			"save_workers_size":  1,
+			"save_process":       "HeadersParser|Header|Debugger",
+			"primary_mail_host":  h,
+		}
+	} else {
+		if _, ok := c.BackendConfig["save_process"]; !ok {
+			c.BackendConfig["save_process"] = "HeadersParser|Header|Debugger"
+		}
+		if _, ok := c.BackendConfig["primary_mail_host"]; !ok {
+			h, err := os.Hostname()
+			if err != nil {
+				return err
+			}
+			c.BackendConfig["primary_mail_host"] = h
+		}
+		if _, ok := c.BackendConfig["save_workers_size"]; !ok {
+			c.BackendConfig["save_workers_size"] = 1
+		}
+
+		if _, ok := c.BackendConfig["log_received_mails"]; !ok {
+			c.BackendConfig["log_received_mails"] = false
+		}
+	}
+	return nil
+}
+
 // Emits any configuration change events on the server.
 // Emits any configuration change events on the server.
 // All events are fired and run synchronously
 // All events are fired and run synchronously
 func (sc *ServerConfig) emitChangeEvents(oldServer *ServerConfig, app Guerrilla) {
 func (sc *ServerConfig) emitChangeEvents(oldServer *ServerConfig, app Guerrilla) {
@@ -118,33 +282,33 @@ func (sc *ServerConfig) emitChangeEvents(oldServer *ServerConfig, app Guerrilla)
 	)
 	)
 	if len(changes) > 0 {
 	if len(changes) > 0 {
 		// something changed in the server config
 		// something changed in the server config
-		app.Publish("server_change:update_config", sc)
+		app.Publish(EventConfigServerConfig, sc)
 	}
 	}
 
 
 	// enable or disable?
 	// enable or disable?
 	if _, ok := changes["IsEnabled"]; ok {
 	if _, ok := changes["IsEnabled"]; ok {
 		if sc.IsEnabled {
 		if sc.IsEnabled {
-			app.Publish("server_change:start_server", sc)
+			app.Publish(EventConfigServerStart, sc)
 		} else {
 		} else {
-			app.Publish("server_change:stop_server", sc)
+			app.Publish(EventConfigServerStop, sc)
 		}
 		}
 		// do not emit any more events when IsEnabled changed
 		// do not emit any more events when IsEnabled changed
 		return
 		return
 	}
 	}
 	// log file change?
 	// log file change?
 	if _, ok := changes["LogFile"]; ok {
 	if _, ok := changes["LogFile"]; ok {
-		app.Publish("server_change:new_log_file", sc)
+		app.Publish(EventConfigServerLogFile, sc)
 	} else {
 	} else {
 		// since config file has not changed, we reload it
 		// since config file has not changed, we reload it
-		app.Publish("server_change:reopen_log_file", sc)
+		app.Publish(EventConfigServerLogReopen, sc)
 	}
 	}
 	// timeout changed
 	// timeout changed
 	if _, ok := changes["Timeout"]; ok {
 	if _, ok := changes["Timeout"]; ok {
-		app.Publish("server_change:timeout", sc)
+		app.Publish(EventConfigServerTimeout, sc)
 	}
 	}
 	// max_clients changed
 	// max_clients changed
 	if _, ok := changes["MaxClients"]; ok {
 	if _, ok := changes["MaxClients"]; ok {
-		app.Publish("server_change:max_clients", sc)
+		app.Publish(EventConfigServerMaxClients, sc)
 	}
 	}
 
 
 	// tls changed
 	// tls changed
@@ -163,7 +327,7 @@ func (sc *ServerConfig) emitChangeEvents(oldServer *ServerConfig, app Guerrilla)
 		}
 		}
 		return false
 		return false
 	}(); ok {
 	}(); ok {
-		app.Publish("server_change:tls_config", sc)
+		app.Publish(EventConfigServerTLSConfig, sc)
 	}
 	}
 }
 }
 
 
@@ -195,6 +359,29 @@ func (sc *ServerConfig) getTlsKeyTimestamps() (int, int) {
 	return sc._privateKeyFile_mtime, sc._publicKeyFile_mtime
 	return sc._privateKeyFile_mtime, sc._publicKeyFile_mtime
 }
 }
 
 
+// Validate validates the server's configuration.
+func (sc *ServerConfig) Validate() error {
+	var errs Errors
+
+	if sc.StartTLSOn || sc.TLSAlwaysOn {
+		if sc.PublicKeyFile == "" {
+			errs = append(errs, errors.New("PublicKeyFile is empty"))
+		}
+		if sc.PrivateKeyFile == "" {
+			errs = append(errs, errors.New("PrivateKeyFile is empty"))
+		}
+		if _, err := tls.LoadX509KeyPair(sc.PublicKeyFile, sc.PrivateKeyFile); err != nil {
+			errs = append(errs,
+				errors.New(fmt.Sprintf("cannot use TLS config for [%s], %v", sc.ListenInterface, err)))
+		}
+	}
+	if len(errs) > 0 {
+		return errs
+	}
+
+	return nil
+}
+
 // Returns a diff between struct a & struct b.
 // Returns a diff between struct a & struct b.
 // Results are returned in a map, where each key is the name of the field that was different.
 // Results are returned in a map, where each key is the name of the field that was different.
 // a and b are struct values, must not be pointer
 // a and b are struct values, must not be pointer

+ 38 - 32
config_test.go

@@ -22,9 +22,8 @@ var configJsonA = `
 {
 {
     "log_file" : "./tests/testlog",
     "log_file" : "./tests/testlog",
     "log_level" : "debug",
     "log_level" : "debug",
-    "pid_file" : "/var/run/go-guerrilla.pid",
+    "pid_file" : "tests/go-guerrilla.pid",
     "allowed_hosts": ["spam4.me","grr.la"],
     "allowed_hosts": ["spam4.me","grr.la"],
-    "backend_name" : "dummy",
     "backend_config" :
     "backend_config" :
         {
         {
             "log_received_mails" : true
             "log_received_mails" : true
@@ -38,7 +37,7 @@ var configJsonA = `
             "public_key_file":"config_test.go",
             "public_key_file":"config_test.go",
             "timeout":160,
             "timeout":160,
             "listen_interface":"127.0.0.1:2526",
             "listen_interface":"127.0.0.1:2526",
-            "start_tls_on":true,
+            "start_tls_on":false,
             "tls_always_on":false,
             "tls_always_on":false,
             "max_clients": 2
             "max_clients": 2
         },
         },
@@ -64,7 +63,7 @@ var configJsonA = `
             "public_key_file":"config_test.go",
             "public_key_file":"config_test.go",
             "timeout":160,
             "timeout":160,
             "listen_interface":"127.0.0.1:9999",
             "listen_interface":"127.0.0.1:9999",
-            "start_tls_on":true,
+            "start_tls_on":false,
             "tls_always_on":false,
             "tls_always_on":false,
             "max_clients": 2
             "max_clients": 2
         },
         },
@@ -77,7 +76,7 @@ var configJsonA = `
             "public_key_file":"config_test.go",
             "public_key_file":"config_test.go",
             "timeout":160,
             "timeout":160,
             "listen_interface":"127.0.0.1:3333",
             "listen_interface":"127.0.0.1:3333",
-            "start_tls_on":true,
+            "start_tls_on":false,
             "tls_always_on":false,
             "tls_always_on":false,
             "max_clients": 2
             "max_clients": 2
         }
         }
@@ -96,9 +95,8 @@ var configJsonB = `
 {
 {
     "log_file" : "./tests/testlog",
     "log_file" : "./tests/testlog",
     "log_level" : "debug",
     "log_level" : "debug",
-    "pid_file" : "/var/run/different-go-guerrilla.pid",
+    "pid_file" : "tests/different-go-guerrilla.pid",
     "allowed_hosts": ["spam4.me","grr.la","newhost.com"],
     "allowed_hosts": ["spam4.me","grr.la","newhost.com"],
-    "backend_name" : "dummy",
     "backend_config" :
     "backend_config" :
         {
         {
             "log_received_mails" : true
             "log_received_mails" : true
@@ -126,6 +124,7 @@ var configJsonB = `
             "listen_interface":"127.0.0.1:2527",
             "listen_interface":"127.0.0.1:2527",
             "start_tls_on":true,
             "start_tls_on":true,
             "tls_always_on":false,
             "tls_always_on":false,
+            "log_file" : "./tests/testlog",
             "max_clients": 2
             "max_clients": 2
         },
         },
 
 
@@ -138,7 +137,7 @@ var configJsonB = `
             "timeout":180,
             "timeout":180,
             "listen_interface":"127.0.0.1:4654",
             "listen_interface":"127.0.0.1:4654",
             "start_tls_on":false,
             "start_tls_on":false,
-            "tls_always_on":true,
+            "tls_always_on":false,
             "max_clients":1
             "max_clients":1
         },
         },
 
 
@@ -182,7 +181,7 @@ func TestSampleConfig(t *testing.T) {
 		ac := &AppConfig{}
 		ac := &AppConfig{}
 		if err := ac.Load(jsonBytes); err != nil {
 		if err := ac.Load(jsonBytes); err != nil {
 			// sample config can have broken tls certs
 			// sample config can have broken tls certs
-			if strings.Index(err.Error(), "could not stat key") != 0 {
+			if strings.Index(err.Error(), "cannot use TLS config for [127.0.0.1:25") != 0 {
 				t.Error("Cannot load config", fileName, "|", err)
 				t.Error("Cannot load config", fileName, "|", err)
 				t.FailNow()
 				t.FailNow()
 			}
 			}
@@ -199,39 +198,46 @@ func TestConfigChangeEvents(t *testing.T) {
 	oldconf.Load([]byte(configJsonA))
 	oldconf.Load([]byte(configJsonA))
 	logger, _ := log.GetLogger(oldconf.LogFile)
 	logger, _ := log.GetLogger(oldconf.LogFile)
 	bcfg := backends.BackendConfig{"log_received_mails": true}
 	bcfg := backends.BackendConfig{"log_received_mails": true}
-	backend, _ := backends.New("dummy", bcfg, logger)
-	app, _ := New(oldconf, backend, logger)
+	backend, err := backends.New(bcfg, logger)
+	if err != nil {
+		t.Error("cannot create backend", err)
+	}
+	app, err := New(oldconf, backend, logger)
+	if err != nil {
+		t.Error("cannot create daemon", err)
+	}
 	// simulate timestamp change
 	// simulate timestamp change
+
 	time.Sleep(time.Second + time.Millisecond*500)
 	time.Sleep(time.Second + time.Millisecond*500)
 	os.Chtimes(oldconf.Servers[1].PrivateKeyFile, time.Now(), time.Now())
 	os.Chtimes(oldconf.Servers[1].PrivateKeyFile, time.Now(), time.Now())
 	os.Chtimes(oldconf.Servers[1].PublicKeyFile, time.Now(), time.Now())
 	os.Chtimes(oldconf.Servers[1].PublicKeyFile, time.Now(), time.Now())
 	newconf := &AppConfig{}
 	newconf := &AppConfig{}
 	newconf.Load([]byte(configJsonB))
 	newconf.Load([]byte(configJsonB))
-	newconf.Servers[0].LogFile = "/dev/stderr" // test for log file change
-	newconf.LogLevel = "off"
+	newconf.Servers[0].LogFile = "off" // test for log file change
+	newconf.LogLevel = "info"
 	newconf.LogFile = "off"
 	newconf.LogFile = "off"
-	expectedEvents := map[string]bool{
-		"config_change:pid_file":        false,
-		"config_change:log_file":        false,
-		"config_change:log_level":       false,
-		"config_change:allowed_hosts":   false,
-		"server_change:new_server":      false, // 127.0.0.1:4654 will be added
-		"server_change:remove_server":   false, // 127.0.0.1:9999 server removed
-		"server_change:stop_server":     false, // 127.0.0.1:3333: server (disabled)
-		"server_change:new_log_file":    false, // 127.0.0.1:2526
-		"server_change:reopen_log_file": false, // 127.0.0.1:2527
-		"server_change:timeout":         false, // 127.0.0.1:2526 timeout
+	expectedEvents := map[Event]bool{
+		EventConfigPidFile:         false,
+		EventConfigLogFile:         false,
+		EventConfigLogLevel:        false,
+		EventConfigAllowedHosts:    false,
+		EventConfigServerNew:       false, // 127.0.0.1:4654 will be added
+		EventConfigServerRemove:    false, // 127.0.0.1:9999 server removed
+		EventConfigServerStop:      false, // 127.0.0.1:3333: server (disabled)
+		EventConfigServerLogFile:   false, // 127.0.0.1:2526
+		EventConfigServerLogReopen: false, // 127.0.0.1:2527
+		EventConfigServerTimeout:   false, // 127.0.0.1:2526 timeout
 		//"server_change:tls_config":    false, // 127.0.0.1:2526
 		//"server_change:tls_config":    false, // 127.0.0.1:2526
-		"server_change:max_clients": false, // 127.0.0.1:2526
-		"server_change:tls_config":  false, // 127.0.0.1:2527 timestamp changed on certificates
+		EventConfigServerMaxClients: false, // 127.0.0.1:2526
+		EventConfigServerTLSConfig:  false, // 127.0.0.1:2527 timestamp changed on certificates
 	}
 	}
-	toUnsubscribe := map[string]func(c *AppConfig){}
-	toUnsubscribeS := map[string]func(c *ServerConfig){}
+	toUnsubscribe := map[Event]func(c *AppConfig){}
+	toUnsubscribeSrv := map[Event]func(c *ServerConfig){}
 
 
 	for event := range expectedEvents {
 	for event := range expectedEvents {
 		// Put in anon func since range is overwriting event
 		// Put in anon func since range is overwriting event
-		func(e string) {
-			if strings.Index(e, "config_change") != -1 {
+		func(e Event) {
+			if strings.Index(e.String(), "config_change") != -1 {
 				f := func(c *AppConfig) {
 				f := func(c *AppConfig) {
 					expectedEvents[e] = true
 					expectedEvents[e] = true
 				}
 				}
@@ -243,7 +249,7 @@ func TestConfigChangeEvents(t *testing.T) {
 					expectedEvents[e] = true
 					expectedEvents[e] = true
 				}
 				}
 				app.Subscribe(event, f)
 				app.Subscribe(event, f)
-				toUnsubscribeS[event] = f
+				toUnsubscribeSrv[event] = f
 			}
 			}
 
 
 		}(event)
 		}(event)
@@ -255,7 +261,7 @@ func TestConfigChangeEvents(t *testing.T) {
 	for unevent, unfun := range toUnsubscribe {
 	for unevent, unfun := range toUnsubscribe {
 		app.Unsubscribe(unevent, unfun)
 		app.Unsubscribe(unevent, unfun)
 	}
 	}
-	for unevent, unfun := range toUnsubscribeS {
+	for unevent, unfun := range toUnsubscribeSrv {
 		app.Unsubscribe(unevent, unfun)
 		app.Unsubscribe(unevent, unfun)
 	}
 	}
 	for event, val := range expectedEvents {
 	for event, val := range expectedEvents {

+ 11 - 7
dashboard/dashboard.go

@@ -1,6 +1,7 @@
 package dashboard
 package dashboard
 
 
 import (
 import (
+	"fmt"
 	"math/rand"
 	"math/rand"
 	"net/http"
 	"net/http"
 	"time"
 	"time"
@@ -91,21 +92,24 @@ func applyConfig(c *Config) {
 }
 }
 
 
 func webSocketHandler(w http.ResponseWriter, r *http.Request) {
 func webSocketHandler(w http.ResponseWriter, r *http.Request) {
+	var sess *session
 	cookie, err := r.Cookie("SID")
 	cookie, err := r.Cookie("SID")
+	fmt.Println("cookie", cookie, err.Error())
 	if err != nil {
 	if err != nil {
-		// TODO error
-		w.WriteHeader(http.StatusInternalServerError)
-	}
-	sess, sidExists := sessions[cookie.Value]
-	if !sidExists {
-		// No SID cookie
+		// Haven't set this cookie yet.
 		sess = startSession(w, r)
 		sess = startSession(w, r)
+	} else {
+		var sidExists bool
+		sess, sidExists = sessions[cookie.Value]
+		if !sidExists {
+			// No SID cookie in our store, start a new session
+			sess = startSession(w, r)
+		}
 	}
 	}
 
 
 	conn, err := upgrader.Upgrade(w, r, nil)
 	conn, err := upgrader.Upgrade(w, r, nil)
 	if err != nil {
 	if err != nil {
 		w.WriteHeader(http.StatusInternalServerError)
 		w.WriteHeader(http.StatusInternalServerError)
-		// TODO Internal error
 		return
 		return
 	}
 	}
 	sess.ws = conn
 	sess.ws = conn

+ 0 - 187
envelope/envelope.go

@@ -1,187 +0,0 @@
-package envelope
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/base64"
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"net/textproto"
-	"regexp"
-	"strings"
-
-	"github.com/sloonz/go-qprintable"
-	"gopkg.in/iconv.v1"
-)
-
-// EmailAddress encodes an email address of the form `<user@host>`
-type EmailAddress struct {
-	User string
-	Host string
-}
-
-func (ep *EmailAddress) String() string {
-	return fmt.Sprintf("%s@%s", ep.User, ep.Host)
-}
-
-func (ep *EmailAddress) IsEmpty() bool {
-	return ep.User == "" && ep.Host == ""
-}
-
-// Envelope represents a single SMTP message.
-type Envelope struct {
-	// Remote IP address
-	RemoteAddress string
-	// Message sent in EHLO command
-	Helo string
-	// Sender
-	MailFrom EmailAddress
-	// Recipients
-	RcptTo []EmailAddress
-	// Data stores the header and message body
-	Data bytes.Buffer
-	// Subject stores the subject of the email, extracted and decoded after calling ParseHeaders()
-	Subject string
-	// TLS is true if the email was received using a TLS connection
-	TLS bool
-	// Header stores the results from ParseHeaders()
-	Header textproto.MIMEHeader
-}
-
-// ParseHeaders parses the headers into Header field of the Envelope struct.
-// Data buffer must be full before calling.
-// It assumes that at most 30kb of email data can be a header
-// Decoding of encoding to UTF is only done on the Subject, where the result is assigned to the Subject field
-func (e *Envelope) ParseHeaders() error {
-	var err error
-	if e.Header != nil {
-		return errors.New("Headers already parsed")
-	}
-	all := e.Data.Bytes()
-
-	// find where the header ends, assuming that over 30 kb would be max
-	max := 1024 * 30
-	if len(all) < max {
-		max = len(all) - 1
-	}
-	headerEnd := bytes.Index(all[:max], []byte("\n\n"))
-
-	if headerEnd > -1 {
-		headerReader := textproto.NewReader(bufio.NewReader(bytes.NewBuffer(all[0:headerEnd])))
-		e.Header, err = headerReader.ReadMIMEHeader()
-		if err != nil {
-			// decode the subject
-			if subject, ok := e.Header["Subject"]; ok {
-				e.Subject = MimeHeaderDecode(subject[0])
-			}
-		}
-	} else {
-		err = errors.New("header not found")
-	}
-	return err
-}
-
-var mimeRegex, _ = regexp.Compile(`=\?(.+?)\?([QBqp])\?(.+?)\?=`)
-
-// Decode strings in Mime header format
-// eg. =?ISO-2022-JP?B?GyRCIVo9dztSOWJAOCVBJWMbKEI=?=
-func MimeHeaderDecode(str string) string {
-
-	matched := mimeRegex.FindAllStringSubmatch(str, -1)
-	var charset, encoding, payload string
-	if matched != nil {
-		for i := 0; i < len(matched); i++ {
-			if len(matched[i]) > 2 {
-				charset = matched[i][1]
-				encoding = strings.ToUpper(matched[i][2])
-				payload = matched[i][3]
-				switch encoding {
-				case "B":
-					str = strings.Replace(
-						str,
-						matched[i][0],
-						MailTransportDecode(payload, "base64", charset),
-						1)
-				case "Q":
-					str = strings.Replace(
-						str,
-						matched[i][0],
-						MailTransportDecode(payload, "quoted-printable", charset),
-						1)
-				}
-			}
-		}
-	}
-	return str
-}
-
-// decode from 7bit to 8bit UTF-8
-// encodingType can be "base64" or "quoted-printable"
-func MailTransportDecode(str string, encodingType string, charset string) string {
-	if charset == "" {
-		charset = "UTF-8"
-	} else {
-		charset = strings.ToUpper(charset)
-	}
-	if encodingType == "base64" {
-		str = fromBase64(str)
-	} else if encodingType == "quoted-printable" {
-		str = fromQuotedP(str)
-	}
-
-	if charset != "UTF-8" {
-		charset = fixCharset(charset)
-		// iconv is pretty good at what it does
-		if cd, err := iconv.Open("UTF-8", charset); err == nil {
-			defer func() {
-				cd.Close()
-				if r := recover(); r != nil {
-					//logln(1, fmt.Sprintf("Recovered in %v", r))
-				}
-			}()
-			// eg. charset can be "ISO-2022-JP"
-			return cd.ConvString(str)
-		}
-
-	}
-	return str
-}
-
-func fromBase64(data string) string {
-	buf := bytes.NewBufferString(data)
-	decoder := base64.NewDecoder(base64.StdEncoding, buf)
-	res, _ := ioutil.ReadAll(decoder)
-	return string(res)
-}
-
-func fromQuotedP(data string) string {
-	buf := bytes.NewBufferString(data)
-	decoder := qprintable.NewDecoder(qprintable.BinaryEncoding, buf)
-	res, _ := ioutil.ReadAll(decoder)
-	return string(res)
-}
-
-var charsetRegex, _ = regexp.Compile(`[_:.\/\\]`)
-
-func fixCharset(charset string) string {
-	fixed_charset := charsetRegex.ReplaceAllString(charset, "-")
-	// Fix charset
-	// borrowed from http://squirrelmail.svn.sourceforge.net/viewvc/squirrelmail/trunk/squirrelmail/include/languages.php?revision=13765&view=markup
-	// OE ks_c_5601_1987 > cp949
-	fixed_charset = strings.Replace(fixed_charset, "ks-c-5601-1987", "cp949", -1)
-	// Moz x-euc-tw > euc-tw
-	fixed_charset = strings.Replace(fixed_charset, "x-euc", "euc", -1)
-	// Moz x-windows-949 > cp949
-	fixed_charset = strings.Replace(fixed_charset, "x-windows_", "cp", -1)
-	// windows-125x and cp125x charsets
-	fixed_charset = strings.Replace(fixed_charset, "windows-", "cp", -1)
-	// ibm > cp
-	fixed_charset = strings.Replace(fixed_charset, "ibm", "cp", -1)
-	// iso-8859-8-i -> iso-8859-8
-	fixed_charset = strings.Replace(fixed_charset, "iso-8859-8-i", "iso-8859-8", -1)
-	if charset != fixed_charset {
-		return fixed_charset
-	}
-	return charset
-}

+ 87 - 0
event.go

@@ -0,0 +1,87 @@
+package guerrilla
+
+import (
+	evbus "github.com/asaskevich/EventBus"
+)
+
+type Event int
+
+const (
+	// when a new config was loaded
+	EventConfigNewConfig Event = iota
+	// when allowed_hosts changed
+	EventConfigAllowedHosts
+	// when pid_file changed
+	EventConfigPidFile
+	// when log_file changed
+	EventConfigLogFile
+	// when it's time to reload the main log file
+	EventConfigLogReopen
+	// when log level changed
+	EventConfigLogLevel
+	// when the backend's config changed
+	EventConfigBackendConfig
+	// when a new server was added
+	EventConfigServerNew
+	// when an existing server was removed
+	EventConfigServerRemove
+	// when a new server config was detected (general event)
+	EventConfigServerConfig
+	// when a server was enabled
+	EventConfigServerStart
+	// when a server was disabled
+	EventConfigServerStop
+	// when a server's log file changed
+	EventConfigServerLogFile
+	// when it's time to reload the server's log
+	EventConfigServerLogReopen
+	// when a server's timeout changed
+	EventConfigServerTimeout
+	// when a server's max clients changed
+	EventConfigServerMaxClients
+	// when a server's TLS config changed
+	EventConfigServerTLSConfig
+)
+
+var eventList = [...]string{
+	"config_change:new_config",
+	"config_change:allowed_hosts",
+	"config_change:pid_file",
+	"config_change:log_file",
+	"config_change:reopen_log_file",
+	"config_change:log_level",
+	"config_change:backend_config",
+	"server_change:new_server",
+	"server_change:remove_server",
+	"server_change:update_config",
+	"server_change:start_server",
+	"server_change:stop_server",
+	"server_change:new_log_file",
+	"server_change:reopen_log_file",
+	"server_change:timeout",
+	"server_change:max_clients",
+	"server_change:tls_config",
+}
+
+func (e Event) String() string {
+	return eventList[e]
+}
+
+type EventHandler struct {
+	*evbus.EventBus
+}
+
+func (h *EventHandler) Subscribe(topic Event, fn interface{}) error {
+	if h.EventBus == nil {
+		h.EventBus = evbus.New()
+	}
+	return h.EventBus.Subscribe(topic.String(), fn)
+}
+
+func (h *EventHandler) Publish(topic Event, args ...interface{}) {
+	h.EventBus.Publish(topic.String(), args...)
+}
+
+func (h *EventHandler) Unsubscribe(topic Event, handler interface{}) error {
+	return h.EventBus.Unsubscribe(topic.String(), handler)
+}

+ 37 - 0
glide.lock

@@ -0,0 +1,37 @@
+hash: f50310b578ee0843edaa21515f34e04d659a39b4d2ee77cc5046152ba5c93003
+updated: 2017-03-12T16:16:53.851633642-07:00
+imports:
+- name: github.com/asaskevich/EventBus
+  version: ab9e5ceb2cc1ca6f36a5813c928c534e837681c2
+- name: github.com/garyburd/redigo
+  version: 8873b2f1995f59d4bcdd2b0dc9858e2cb9bf0c13
+  subpackages:
+  - internal
+  - redis
+- name: github.com/go-sql-driver/mysql
+  version: a0583e0143b1624142adab07e0e97fe106d99561
+- name: github.com/gorilla/context
+  version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
+- name: github.com/gorilla/mux
+  version: 599cba5e7b6137d46ddf58fb1765f5d928e69604
+- name: github.com/gorilla/websocket
+  version: b258b4fadb573ac412f187b9f31974ea99d32f50
+- name: github.com/inconshreveable/mousetrap
+  version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+- name: github.com/rakyll/statik
+  version: 274df120e9065bdd08eb1120e0375e3dc1ae8465
+  subpackages:
+  - fs
+- name: github.com/Sirupsen/logrus
+  version: 0208149b40d863d2c1a2f8fe5753096a9cf2cc8b
+- name: github.com/spf13/cobra
+  version: 16c014f1a19d865b765b420e74508f80eb831ada
+- name: github.com/spf13/pflag
+  version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
+- name: golang.org/x/sys
+  version: 478fcf54317e52ab69f40bb4c7a1520288d7f7ea
+  subpackages:
+  - unix
+- name: gopkg.in/iconv.v1
+  version: 16a760eb7e186ae0e3aedda00d4a1daa4d0701d8
+testImports: []

+ 0 - 6
glide.yaml

@@ -6,13 +6,7 @@ import:
   version: ~1.0.0
   version: ~1.0.0
   subpackages:
   subpackages:
   - redis
   - redis
-- package: github.com/sloonz/go-qprintable
 - package: github.com/spf13/cobra
 - package: github.com/spf13/cobra
-- package: github.com/ziutek/mymysql
-  version: ~1.5.4
-  subpackages:
-  - autorc
-  - godrv
 - package: gopkg.in/iconv.v1
 - package: gopkg.in/iconv.v1
   version: ~1.1.1
   version: ~1.1.1
 - package: github.com/rakyll/statik
 - package: github.com/rakyll/statik

+ 4 - 2
goguerrilla.conf.sample

@@ -9,9 +9,11 @@
       "guerrillamail.org"
       "guerrillamail.org"
     ],
     ],
     "pid_file" : "/var/run/go-guerrilla.pid",
     "pid_file" : "/var/run/go-guerrilla.pid",
-    "backend_name": "dummy",
     "backend_config": {
     "backend_config": {
-        "log_received_mails": true
+        "log_received_mails": true,
+        "save_workers_size": 1,
+        "save_process" : "HeadersParser|Header|Debugger",
+        "primary_mail_host" : "mail.example.com"
     },
     },
     "dashboard": {
     "dashboard": {
       "is_enabled": true,
       "is_enabled": true,

+ 219 - 120
guerrilla.go

@@ -2,10 +2,11 @@ package guerrilla
 
 
 import (
 import (
 	"errors"
 	"errors"
+	"fmt"
+	"os"
 	"sync"
 	"sync"
 	"sync/atomic"
 	"sync/atomic"
 
 
-	evbus "github.com/asaskevich/EventBus"
 	"github.com/flashmob/go-guerrilla/backends"
 	"github.com/flashmob/go-guerrilla/backends"
 	"github.com/flashmob/go-guerrilla/dashboard"
 	"github.com/flashmob/go-guerrilla/dashboard"
 	"github.com/flashmob/go-guerrilla/log"
 	"github.com/flashmob/go-guerrilla/log"
@@ -38,29 +39,33 @@ func (e Errors) Error() string {
 type Guerrilla interface {
 type Guerrilla interface {
 	Start() error
 	Start() error
 	Shutdown()
 	Shutdown()
-	Subscribe(topic string, fn interface{}) error
-	Publish(topic string, args ...interface{})
-	Unsubscribe(topic string, handler interface{}) error
+	Subscribe(topic Event, fn interface{}) error
+	Publish(topic Event, args ...interface{})
+	Unsubscribe(topic Event, handler interface{}) error
 	SetLogger(log.Logger)
 	SetLogger(log.Logger)
 }
 }
 
 
 type guerrilla struct {
 type guerrilla struct {
 	Config  AppConfig
 	Config  AppConfig
 	servers map[string]*server
 	servers map[string]*server
-	backend backends.Backend
 	// guard controls access to g.servers
 	// guard controls access to g.servers
-	guard   sync.Mutex
-	state   int8
-	bus     *evbus.EventBus
-	mainlog logStore
+	guard sync.Mutex
+	state int8
+	EventHandler
+	logStore
+	backendStore
 }
 }
 
 
 type logStore struct {
 type logStore struct {
 	atomic.Value
 	atomic.Value
 }
 }
 
 
+type backendStore struct {
+	atomic.Value
+}
+
 // Get loads the log.logger in an atomic operation. Returns a stderr logger if not able to load
 // Get loads the log.logger in an atomic operation. Returns a stderr logger if not able to load
-func (ls *logStore) Get() log.Logger {
+func (ls *logStore) mainlog() log.Logger {
 	if v, ok := ls.Load().(log.Logger); ok {
 	if v, ok := ls.Load().(log.Logger); ok {
 		return v
 		return v
 	}
 	}
@@ -68,47 +73,65 @@ func (ls *logStore) Get() log.Logger {
 	return l
 	return l
 }
 }
 
 
-// Returns a new instance of Guerrilla with the given config, not yet running.
+// storeMainlog stores the log value in an atomic operation
+func (ls *logStore) setMainlog(log log.Logger) {
+	ls.Store(log)
+}
+
+// Returns a new instance of Guerrilla with the given config, not yet running. Backend started.
 func New(ac *AppConfig, b backends.Backend, l log.Logger) (Guerrilla, error) {
 func New(ac *AppConfig, b backends.Backend, l log.Logger) (Guerrilla, error) {
 	g := &guerrilla{
 	g := &guerrilla{
 		Config:  *ac, // take a local copy
 		Config:  *ac, // take a local copy
 		servers: make(map[string]*server, len(ac.Servers)),
 		servers: make(map[string]*server, len(ac.Servers)),
-		backend: b,
-		bus:     evbus.New(),
 	}
 	}
-	g.mainlog.Store(l)
+	g.backendStore.Store(b)
+	g.setMainlog(l)
 
 
 	if ac.LogLevel != "" {
 	if ac.LogLevel != "" {
-		g.mainlog.Get().SetLevel(ac.LogLevel)
+		g.mainlog().SetLevel(ac.LogLevel)
 	}
 	}
 
 
 	g.state = GuerrillaStateNew
 	g.state = GuerrillaStateNew
 	err := g.makeServers()
 	err := g.makeServers()
 
 
+	// start backend for processing email
+	err = g.backend().Start()
+
+	if err != nil {
+		return g, err
+	}
+	g.writePid()
+
 	// subscribe for any events that may come in while running
 	// subscribe for any events that may come in while running
 	g.subscribeEvents()
 	g.subscribeEvents()
+
 	return g, err
 	return g, err
 }
 }
 
 
 // Instantiate servers
 // Instantiate servers
 func (g *guerrilla) makeServers() error {
 func (g *guerrilla) makeServers() error {
-	g.mainlog.Get().Debug("making servers")
+	g.mainlog().Debug("making servers")
 	var errs Errors
 	var errs Errors
 	for _, sc := range g.Config.Servers {
 	for _, sc := range g.Config.Servers {
 		if _, ok := g.servers[sc.ListenInterface]; ok {
 		if _, ok := g.servers[sc.ListenInterface]; ok {
 			// server already instantiated
 			// server already instantiated
 			continue
 			continue
 		}
 		}
-		server, err := newServer(&sc, g.backend, g.mainlog.Get())
-		if err != nil {
-			g.mainlog.Get().WithError(err).Errorf("Failed to create server [%s]", sc.ListenInterface)
+		if err := sc.Validate(); err != nil {
+			g.mainlog().WithError(errs).Errorf("Failed to create server [%s]", sc.ListenInterface)
 			errs = append(errs, err)
 			errs = append(errs, err)
+			continue
+		} else {
+			server, err := newServer(&sc, g.backend(), g.mainlog())
+			if err != nil {
+				g.mainlog().WithError(err).Errorf("Failed to create server [%s]", sc.ListenInterface)
+				errs = append(errs, err)
+			}
+			if server != nil {
+				g.servers[sc.ListenInterface] = server
+				server.setAllowedHosts(g.Config.AllowedHosts)
+			}
 		}
 		}
-		if server != nil {
-			g.servers[sc.ListenInterface] = server
-			server.setAllowedHosts(g.Config.AllowedHosts)
-		}
-
 	}
 	}
 	if len(g.servers) == 0 {
 	if len(g.servers) == 0 {
 		errs = append(errs, errors.New("There are no servers that can start, please check your config"))
 		errs = append(errs, errors.New("There are no servers that can start, please check your config"))
@@ -119,41 +142,37 @@ func (g *guerrilla) makeServers() error {
 	return errs
 	return errs
 }
 }
 
 
-// find a server by interface, retuning the index of the config and instance of server
-func (g *guerrilla) findServer(iface string) (int, *server) {
+// findServer finds a server by iface (interface), retuning the server or err
+func (g *guerrilla) findServer(iface string) (*server, error) {
 	g.guard.Lock()
 	g.guard.Lock()
 	defer g.guard.Unlock()
 	defer g.guard.Unlock()
-	ret := -1
-	for i := range g.Config.Servers {
-		if g.Config.Servers[i].ListenInterface == iface {
-			server := g.servers[iface]
-			ret = i
-			return ret, server
-		}
+	if server, ok := g.servers[iface]; ok {
+		return server, nil
 	}
 	}
-	return ret, nil
+	return nil, errors.New("server not found in g.servers")
 }
 }
 
 
-func (g *guerrilla) removeServer(serverConfigIndex int, iface string) {
+// removeServer removes a server from the list of servers
+func (g *guerrilla) removeServer(iface string) {
 	g.guard.Lock()
 	g.guard.Lock()
 	defer g.guard.Unlock()
 	defer g.guard.Unlock()
 	delete(g.servers, iface)
 	delete(g.servers, iface)
-	// cut out from the slice
-	g.Config.Servers = append(g.Config.Servers[:serverConfigIndex], g.Config.Servers[1:]...)
 }
 }
 
 
-func (g *guerrilla) addServer(sc *ServerConfig) {
+// setConfig sets the app config
+func (g *guerrilla) setConfig(c *AppConfig) {
 	g.guard.Lock()
 	g.guard.Lock()
 	defer g.guard.Unlock()
 	defer g.guard.Unlock()
-	g.Config.Servers = append(g.Config.Servers, *sc)
-	g.makeServers()
+	g.Config = *c
 }
 }
 
 
-func (g *guerrilla) setConfig(i int, sc *ServerConfig) {
+// setServerConfig config updates the server's config, which will update for the next connected client
+func (g *guerrilla) setServerConfig(sc *ServerConfig) {
 	g.guard.Lock()
 	g.guard.Lock()
 	defer g.guard.Unlock()
 	defer g.guard.Unlock()
-	g.Config.Servers[i] = *sc
-	g.servers[sc.ListenInterface].setConfig(sc)
+	if _, ok := g.servers[sc.ListenInterface]; ok {
+		g.servers[sc.ListenInterface].setConfig(sc)
+	}
 }
 }
 
 
 // mapServers calls a callback on each server in g.servers map
 // mapServers calls a callback on each server in g.servers map
@@ -170,132 +189,150 @@ func (g *guerrilla) mapServers(callback func(*server)) map[string]*server {
 // subscribeEvents subscribes event handlers for configuration change events
 // subscribeEvents subscribes event handlers for configuration change events
 func (g *guerrilla) subscribeEvents() {
 func (g *guerrilla) subscribeEvents() {
 
 
+	// main config changed
+	g.Subscribe(EventConfigNewConfig, func(c *AppConfig) {
+		g.setConfig(c)
+	})
+
 	// allowed_hosts changed, set for all servers
 	// allowed_hosts changed, set for all servers
-	g.Subscribe("config_change:allowed_hosts", func(c *AppConfig) {
+	g.Subscribe(EventConfigAllowedHosts, func(c *AppConfig) {
 		g.mapServers(func(server *server) {
 		g.mapServers(func(server *server) {
 			server.setAllowedHosts(c.AllowedHosts)
 			server.setAllowedHosts(c.AllowedHosts)
 		})
 		})
-		g.mainlog.Get().Infof("allowed_hosts config changed, a new list was set")
+		g.mainlog().Infof("allowed_hosts config changed, a new list was set")
 	})
 	})
 
 
 	// the main log file changed
 	// the main log file changed
-	g.Subscribe("config_change:log_file", func(c *AppConfig) {
+	g.Subscribe(EventConfigLogFile, func(c *AppConfig) {
 		var err error
 		var err error
 		var l log.Logger
 		var l log.Logger
 		if l, err = log.GetLogger(c.LogFile); err == nil {
 		if l, err = log.GetLogger(c.LogFile); err == nil {
-			g.mainlog.Store(l)
+			g.setMainlog(l)
 			g.mapServers(func(server *server) {
 			g.mapServers(func(server *server) {
-				server.mainlogStore.Store(l) // it will change to hl on the next accepted client
+				// it will change server's logger when the next client gets accepted
+				server.mainlogStore.Store(l)
 			})
 			})
-			g.mainlog.Get().Infof("main log for new clients changed to to [%s]", c.LogFile)
+			g.mainlog().Infof("main log for new clients changed to [%s]", c.LogFile)
 		} else {
 		} else {
-			g.mainlog.Get().WithError(err).Errorf("main logging change failed [%s]", c.LogFile)
+			g.mainlog().WithError(err).Errorf("main logging change failed [%s]", c.LogFile)
 		}
 		}
 
 
 	})
 	})
 
 
 	// re-open the main log file (file not changed)
 	// re-open the main log file (file not changed)
-	g.Subscribe("config_change:reopen_log_file", func(c *AppConfig) {
-		g.mainlog.Get().Reopen()
-		g.mainlog.Get().Infof("re-opened main log file [%s]", c.LogFile)
+	g.Subscribe(EventConfigLogReopen, func(c *AppConfig) {
+		g.mainlog().Reopen()
+		g.mainlog().Infof("re-opened main log file [%s]", c.LogFile)
 	})
 	})
 
 
 	// when log level changes, apply to mainlog and server logs
 	// when log level changes, apply to mainlog and server logs
-	g.Subscribe("config_change:log_level", func(c *AppConfig) {
-		g.mainlog.Get().SetLevel(c.LogLevel)
+	g.Subscribe(EventConfigLogLevel, func(c *AppConfig) {
+		g.mainlog().SetLevel(c.LogLevel)
 		g.mapServers(func(server *server) {
 		g.mapServers(func(server *server) {
 			server.log.SetLevel(c.LogLevel)
 			server.log.SetLevel(c.LogLevel)
 		})
 		})
-		g.mainlog.Get().Infof("log level changed to [%s]", c.LogLevel)
+		g.mainlog().Infof("log level changed to [%s]", c.LogLevel)
+	})
+
+	// write out our pid whenever the file name changes in the config
+	g.Subscribe(EventConfigPidFile, func(ac *AppConfig) {
+		g.writePid()
 	})
 	})
 
 
 	// server config was updated
 	// server config was updated
-	g.Subscribe("server_change:update_config", func(sc *ServerConfig) {
-		if i, _ := g.findServer(sc.ListenInterface); i != -1 {
-			g.setConfig(i, sc)
-		}
+	g.Subscribe(EventConfigServerConfig, func(sc *ServerConfig) {
+		g.setServerConfig(sc)
 	})
 	})
 
 
 	// add a new server to the config & start
 	// add a new server to the config & start
-	g.Subscribe("server_change:new_server", func(sc *ServerConfig) {
-		if i, _ := g.findServer(sc.ListenInterface); i == -1 {
+	g.Subscribe(EventConfigServerNew, func(sc *ServerConfig) {
+		g.mainlog().Debugf("event fired [%s] %s", EventConfigServerNew, sc.ListenInterface)
+		if _, err := g.findServer(sc.ListenInterface); err != nil {
 			// not found, lets add it
 			// not found, lets add it
-			g.addServer(sc)
-			g.mainlog.Get().Infof("New server added [%s]", sc.ListenInterface)
+			//
+			if err := g.makeServers(); err != nil {
+				g.mainlog().WithError(err).Errorf("cannot add server [%s]", sc.ListenInterface)
+				return
+			}
+			g.mainlog().Infof("New server added [%s]", sc.ListenInterface)
 			if g.state == GuerrillaStateStarted {
 			if g.state == GuerrillaStateStarted {
 				err := g.Start()
 				err := g.Start()
 				if err != nil {
 				if err != nil {
-					g.mainlog.Get().WithError(err).Info("Event server_change:new_server returned errors when starting")
+					g.mainlog().WithError(err).Info("Event server_change:new_server returned errors when starting")
 				}
 				}
 			}
 			}
+		} else {
+			g.mainlog().Debugf("new event, but server already fund")
 		}
 		}
 	})
 	})
-	// start a server that already exists in the config and has been instantiated
-	g.Subscribe("server_change:start_server", func(sc *ServerConfig) {
-		if i, server := g.findServer(sc.ListenInterface); i != -1 {
+	// start a server that already exists in the config and has been enabled
+	g.Subscribe(EventConfigServerStart, func(sc *ServerConfig) {
+		if server, err := g.findServer(sc.ListenInterface); err == nil {
 			if server.state == ServerStateStopped || server.state == ServerStateNew {
 			if server.state == ServerStateStopped || server.state == ServerStateNew {
-				g.mainlog.Get().Infof("Starting server [%s]", server.listenInterface)
+				g.mainlog().Infof("Starting server [%s]", server.listenInterface)
 				err := g.Start()
 				err := g.Start()
 				if err != nil {
 				if err != nil {
-					g.mainlog.Get().WithError(err).Info("Event server_change:start_server returned errors when starting")
+					g.mainlog().WithError(err).Info("Event server_change:start_server returned errors when starting")
 				}
 				}
 			}
 			}
 		}
 		}
 	})
 	})
 	// stop running a server
 	// stop running a server
-	g.Subscribe("server_change:stop_server", func(sc *ServerConfig) {
-		if i, server := g.findServer(sc.ListenInterface); i != -1 {
+	g.Subscribe(EventConfigServerStop, func(sc *ServerConfig) {
+		if server, err := g.findServer(sc.ListenInterface); err == nil {
 			if server.state == ServerStateRunning {
 			if server.state == ServerStateRunning {
 				server.Shutdown()
 				server.Shutdown()
-				g.mainlog.Get().Infof("Server [%s] stopped.", sc.ListenInterface)
+				g.mainlog().Infof("Server [%s] stopped.", sc.ListenInterface)
 			}
 			}
 		}
 		}
 	})
 	})
 	// server was removed from config
 	// server was removed from config
-	g.Subscribe("server_change:remove_server", func(sc *ServerConfig) {
-		if i, server := g.findServer(sc.ListenInterface); i != -1 {
+	g.Subscribe(EventConfigServerRemove, func(sc *ServerConfig) {
+		if server, err := g.findServer(sc.ListenInterface); err == nil {
 			server.Shutdown()
 			server.Shutdown()
-			g.removeServer(i, sc.ListenInterface)
-			g.mainlog.Get().Infof("Server [%s] removed from config, stopped it.", sc.ListenInterface)
+			g.removeServer(sc.ListenInterface)
+			g.mainlog().Infof("Server [%s] removed from config, stopped it.", sc.ListenInterface)
 		}
 		}
 	})
 	})
 
 
 	// TLS changes
 	// TLS changes
-	g.Subscribe("server_change:tls_config", func(sc *ServerConfig) {
-		if i, server := g.findServer(sc.ListenInterface); i != -1 {
+	g.Subscribe(EventConfigServerTLSConfig, func(sc *ServerConfig) {
+		if server, err := g.findServer(sc.ListenInterface); err == nil {
 			if err := server.configureSSL(); err == nil {
 			if err := server.configureSSL(); err == nil {
-				g.mainlog.Get().Infof("Server [%s] new TLS configuration loaded", sc.ListenInterface)
+				g.mainlog().Infof("Server [%s] new TLS configuration loaded", sc.ListenInterface)
 			} else {
 			} else {
-				g.mainlog.Get().WithError(err).Errorf("Server [%s] failed to load the new TLS configuration", sc.ListenInterface)
+				g.mainlog().WithError(err).Errorf("Server [%s] failed to load the new TLS configuration", sc.ListenInterface)
 			}
 			}
 		}
 		}
 	})
 	})
 	// when server's timeout change.
 	// when server's timeout change.
-	g.Subscribe("server_change:timeout", func(sc *ServerConfig) {
+	g.Subscribe(EventConfigServerTimeout, func(sc *ServerConfig) {
 		g.mapServers(func(server *server) {
 		g.mapServers(func(server *server) {
 			server.setTimeout(sc.Timeout)
 			server.setTimeout(sc.Timeout)
 		})
 		})
 	})
 	})
 	// when server's max clients change.
 	// when server's max clients change.
-	g.Subscribe("server_change:max_clients", func(sc *ServerConfig) {
+	g.Subscribe(EventConfigServerMaxClients, func(sc *ServerConfig) {
 		g.mapServers(func(server *server) {
 		g.mapServers(func(server *server) {
 			// TODO resize the pool somehow
 			// TODO resize the pool somehow
 		})
 		})
 	})
 	})
 	// when a server's log file changes
 	// when a server's log file changes
-	g.Subscribe("server_change:new_log_file", func(sc *ServerConfig) {
-		if i, server := g.findServer(sc.ListenInterface); i != -1 {
+	g.Subscribe(EventConfigServerLogFile, func(sc *ServerConfig) {
+		if server, err := g.findServer(sc.ListenInterface); err == nil {
 			var err error
 			var err error
 			var l log.Logger
 			var l log.Logger
 			if l, err = log.GetLogger(sc.LogFile); err == nil {
 			if l, err = log.GetLogger(sc.LogFile); err == nil {
-				g.mainlog.Store(l)
-				server.logStore.Store(l) // it will change to l on the next accepted client
-				g.mainlog.Get().Infof("Server [%s] changed, new clients will log to: [%s]",
+				g.setMainlog(l)
+				backends.Svc.SetMainlog(l)
+				// it will change to the new logger on the next accepted client
+				server.logStore.Store(l)
+				g.mainlog().Infof("Server [%s] changed, new clients will log to: [%s]",
 					sc.ListenInterface,
 					sc.ListenInterface,
 					sc.LogFile,
 					sc.LogFile,
 				)
 				)
 			} else {
 			} else {
-				g.mainlog.Get().WithError(err).Errorf(
+				g.mainlog().WithError(err).Errorf(
 					"Server [%s] log change failed to: [%s]",
 					"Server [%s] log change failed to: [%s]",
 					sc.ListenInterface,
 					sc.ListenInterface,
 					sc.LogFile,
 					sc.LogFile,
@@ -303,16 +340,62 @@ func (g *guerrilla) subscribeEvents() {
 			}
 			}
 		}
 		}
 	})
 	})
-	// when the daemon caught a sighup
-	g.Subscribe("server_change:reopen_log_file", func(sc *ServerConfig) {
-		if i, server := g.findServer(sc.ListenInterface); i != -1 {
+	// when the daemon caught a sighup, event for individual server
+	g.Subscribe(EventConfigServerLogReopen, func(sc *ServerConfig) {
+		if server, err := g.findServer(sc.ListenInterface); err == nil {
 			server.log.Reopen()
 			server.log.Reopen()
-			g.mainlog.Get().Infof("Server [%s] re-opened log file [%s]", sc.ListenInterface, sc.LogFile)
+			g.mainlog().Infof("Server [%s] re-opened log file [%s]", sc.ListenInterface, sc.LogFile)
+		}
+	})
+	// when the backend changes
+	g.Subscribe(EventConfigBackendConfig, func(appConfig *AppConfig) {
+		logger, _ := log.GetLogger(appConfig.LogFile)
+		// shutdown the backend first.
+		var err error
+		if err = g.backend().Shutdown(); err != nil {
+			logger.WithError(err).Warn("Backend failed to shutdown")
+			return
+		}
+		// init a new backend, Revert to old backend config if it fails
+		if newBackend, newErr := backends.New(appConfig.BackendConfig, logger); newErr != nil {
+			logger.WithError(newErr).Error("Error while loading the backend")
+			err = g.backend().Reinitialize()
+			if err != nil {
+				logger.WithError(err).Fatal("failed to revert to old backend config")
+				return
+			}
+			err = g.backend().Start()
+			if err != nil {
+				logger.WithError(err).Fatal("failed to start backend with old config")
+				return
+			}
+			logger.Info("reverted to old backend config")
+		} else {
+			// swap to the bew backend (assuming old backend was shutdown so it can be safely swapped)
+			if err := newBackend.Start(); err != nil {
+				logger.WithError(err).Error("backend could not start")
+			}
+			logger.Info("new backend started")
+			g.storeBackend(newBackend)
 		}
 		}
 	})
 	})
 
 
 }
 }
 
 
+func (g *guerrilla) storeBackend(b backends.Backend) {
+	g.backendStore.Store(b)
+	g.mapServers(func(server *server) {
+		server.setBackend(b)
+	})
+}
+
+func (g *guerrilla) backend() backends.Backend {
+	if b, ok := g.backendStore.Load().(backends.Backend); ok {
+		return b
+	}
+	return nil
+}
+
 // Entry point for the application. Starts all servers.
 // Entry point for the application. Starts all servers.
 func (g *guerrilla) Start() error {
 func (g *guerrilla) Start() error {
 	var startErrors Errors
 	var startErrors Errors
@@ -324,6 +407,11 @@ func (g *guerrilla) Start() error {
 	if len(g.servers) == 0 {
 	if len(g.servers) == 0 {
 		return append(startErrors, errors.New("No servers to start, please check the config"))
 		return append(startErrors, errors.New("No servers to start, please check the config"))
 	}
 	}
+	if g.state == GuerrillaStateStopped {
+		// when a backend is shutdown, we need to re-initialize before it can be started again
+		g.backend().Reinitialize()
+		g.backend().Start()
+	}
 	// channel for reading errors
 	// channel for reading errors
 	errs := make(chan error, len(g.servers))
 	errs := make(chan error, len(g.servers))
 	var startWG sync.WaitGroup
 	var startWG sync.WaitGroup
@@ -340,6 +428,7 @@ func (g *guerrilla) Start() error {
 		}
 		}
 		startWG.Add(1)
 		startWG.Add(1)
 		go func(s *server) {
 		go func(s *server) {
+			g.mainlog().Infof("Starting: %s", s.listenInterface)
 			if err := s.Start(&startWG); err != nil {
 			if err := s.Start(&startWG); err != nil {
 				errs <- err
 				errs <- err
 			}
 			}
@@ -361,47 +450,57 @@ func (g *guerrilla) Start() error {
 	}
 	}
 	if len(startErrors) > 0 {
 	if len(startErrors) > 0 {
 		return startErrors
 		return startErrors
-	} else {
-		if gw, ok := g.backend.(*backends.BackendGateway); ok {
-			if gw.State == backends.BackendStateShuttered {
-				_ = gw.Reinitialize()
-			}
-		}
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 func (g *guerrilla) Shutdown() {
 func (g *guerrilla) Shutdown() {
+
+	// shot down the servers first
+	g.mapServers(func(s *server) {
+		if s.state == ServerStateRunning {
+			s.Shutdown()
+			g.mainlog().Infof("shutdown completed for [%s]", s.listenInterface)
+		}
+	})
+
 	g.guard.Lock()
 	g.guard.Lock()
 	defer func() {
 	defer func() {
 		g.state = GuerrillaStateStopped
 		g.state = GuerrillaStateStopped
 		defer g.guard.Unlock()
 		defer g.guard.Unlock()
 	}()
 	}()
-	for ListenInterface, s := range g.servers {
-		if s.state == ServerStateRunning {
-			s.Shutdown()
-			g.mainlog.Get().Infof("shutdown completed for [%s]", ListenInterface)
-		}
-	}
-	if err := g.backend.Shutdown(); err != nil {
-		g.mainlog.Get().WithError(err).Warn("Backend failed to shutdown")
+	if err := g.backend().Shutdown(); err != nil {
+		g.mainlog().WithError(err).Warn("Backend failed to shutdown")
 	} else {
 	} else {
-		g.mainlog.Get().Infof("Backend shutdown completed")
+		g.mainlog().Infof("Backend shutdown completed")
 	}
 	}
 }
 }
 
 
-func (g *guerrilla) Subscribe(topic string, fn interface{}) error {
-	return g.bus.Subscribe(topic, fn)
-}
-
-func (g *guerrilla) Publish(topic string, args ...interface{}) {
-	g.bus.Publish(topic, args...)
-}
-
-func (g *guerrilla) Unsubscribe(topic string, handler interface{}) error {
-	return g.bus.Unsubscribe(topic, handler)
+// SetLogger sets the logger for the app and propagates it to sub-packages (eg.
+func (g *guerrilla) SetLogger(l log.Logger) {
+	l.SetLevel(g.Config.LogLevel)
+	g.setMainlog(l)
+	backends.Svc.SetMainlog(l)
 }
 }
 
 
-func (g *guerrilla) SetLogger(l log.Logger) {
-	g.mainlog.Store(l)
+// writePid writes the pid (process id) to the file specified in the config.
+// Won't write anything if no file specified
+func (g *guerrilla) writePid() error {
+	if len(g.Config.PidFile) > 0 {
+		if f, err := os.Create(g.Config.PidFile); err == nil {
+			defer f.Close()
+			pid := os.Getpid()
+			if _, err := f.WriteString(fmt.Sprintf("%d", pid)); err == nil {
+				f.Sync()
+				g.mainlog().Infof("pid_file (%s) written with pid:%v", g.Config.PidFile, pid)
+			} else {
+				g.mainlog().WithError(err).Errorf("Error while writing pidFile (%s)", g.Config.PidFile)
+				return err
+			}
+		} else {
+			g.mainlog().WithError(err).Errorf("Error while creating pidFile (%s)", g.Config.PidFile)
+			return err
+		}
+	}
+	return nil
 }
 }

+ 1 - 0
log/log.go

@@ -281,6 +281,7 @@ func (hook *LogrusHook) Fire(entry *log.Entry) error {
 		}()
 		}()
 		// use the plain text hook
 		// use the plain text hook
 		entry.Logger.Formatter = hook.plainTxtFormatter
 		entry.Logger.Formatter = hook.plainTxtFormatter
+		// todo : `go go test -v -race` detected a race condition, try log.SetFormatter()
 	}
 	}
 	if line, err := entry.String(); err == nil {
 	if line, err := entry.String(); err == nil {
 		r := strings.NewReader(line)
 		r := strings.NewReader(line)

+ 331 - 0
mail/envelope.go

@@ -0,0 +1,331 @@
+package mail
+
+import (
+	"bufio"
+	"bytes"
+	"crypto/md5"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"gopkg.in/iconv.v1"
+	"io"
+	"io/ioutil"
+	"mime/quotedprintable"
+	"net/textproto"
+	"regexp"
+	"strings"
+	"sync"
+	"time"
+)
+
+const maxHeaderChunk = iota + (3 << 10) // 3KB
+
+// Address encodes an email address of the form `<user@host>`
+type Address struct {
+	User string
+	Host string
+}
+
+func (ep *Address) String() string {
+	return fmt.Sprintf("%s@%s", ep.User, ep.Host)
+}
+
+func (ep *Address) IsEmpty() bool {
+	return ep.User == "" && ep.Host == ""
+}
+
+// Email represents a single SMTP message.
+type Envelope struct {
+	// Remote IP address
+	RemoteIP string
+	// Message sent in EHLO command
+	Helo string
+	// Sender
+	MailFrom Address
+	// Recipients
+	RcptTo []Address
+	// Data stores the header and message body
+	Data bytes.Buffer
+	// Subject stores the subject of the email, extracted and decoded after calling ParseHeaders()
+	Subject string
+	// TLS is true if the email was received using a TLS connection
+	TLS bool
+	// Header stores the results from ParseHeaders()
+	Header textproto.MIMEHeader
+	// Values hold the values generated when processing the envelope by the backend
+	Values map[string]interface{}
+	// Hashes of each email on the rcpt
+	Hashes []string
+	// additional delivery header that may be added
+	DeliveryHeader string
+	// Email(s) will be queued with this id
+	QueuedId string
+	// When locked, it means that the envelope is being processed by the backend
+	sync.Mutex
+}
+
+func NewEnvelope(remoteAddr string, clientID uint64) *Envelope {
+	return &Envelope{
+		RemoteIP: remoteAddr,
+		Values:   make(map[string]interface{}),
+		QueuedId: queuedID(clientID),
+	}
+}
+
+func queuedID(clientID uint64) string {
+	return fmt.Sprintf("%x", md5.Sum([]byte(string(time.Now().Unix())+string(clientID))))
+}
+
+// ParseHeaders parses the headers into Header field of the Envelope struct.
+// Data buffer must be full before calling.
+// It assumes that at most 30kb of email data can be a header
+// Decoding of encoding to UTF is only done on the Subject, where the result is assigned to the Subject field
+func (e *Envelope) ParseHeaders() error {
+	var err error
+	if e.Header != nil {
+		return errors.New("Headers already parsed")
+	}
+	buf := bytes.NewBuffer(e.Data.Bytes())
+	// find where the header ends, assuming that over 30 kb would be max
+	max := maxHeaderChunk
+	if buf.Len() < max {
+		max = buf.Len()
+	}
+	// read in the chunk which we'll scan for the header
+	chunk := make([]byte, max)
+	buf.Read(chunk)
+	headerEnd := strings.Index(string(chunk), "\n\n") // the first two new-lines chars are the End Of Header
+	if headerEnd > -1 {
+		header := chunk[0:headerEnd]
+		headerReader := textproto.NewReader(bufio.NewReader(bytes.NewBuffer(header)))
+		e.Header, err = headerReader.ReadMIMEHeader()
+		if err != nil {
+			// decode the subject
+			if subject, ok := e.Header["Subject"]; ok {
+				e.Subject = MimeHeaderDecode(subject[0])
+			}
+		}
+	} else {
+		err = errors.New("header not found")
+	}
+	return err
+}
+
+// Len returns the number of bytes that would be in the reader returned by NewReader()
+func (e *Envelope) Len() int {
+	return len(e.DeliveryHeader) + e.Data.Len()
+}
+
+// Returns a new reader for reading the email contents, including the delivery headers
+func (e *Envelope) NewReader() io.Reader {
+	return io.MultiReader(
+		strings.NewReader(e.DeliveryHeader),
+		bytes.NewReader(e.Data.Bytes()),
+	)
+}
+
+// String converts the email to string.
+// Typically, you would want to use the compressor guerrilla.Processor for more efficiency, or use NewReader
+func (e *Envelope) String() string {
+	return e.DeliveryHeader + e.Data.String()
+}
+
+// ResetTransaction is called when the transaction is reset (but save connection)
+func (e *Envelope) ResetTransaction() {
+	e.MailFrom = Address{}
+	e.RcptTo = []Address{}
+	// reset the data buffer, keep it allocated
+	e.Data.Reset()
+}
+
+// Seed is called when used with a new connection, once it's accepted
+func (e *Envelope) Reseed(RemoteIP string, clientID uint64) {
+	e.Subject = ""
+	e.RemoteIP = RemoteIP
+	e.Helo = ""
+	e.Header = nil
+	e.TLS = false
+	e.Hashes = make([]string, 0)
+	e.DeliveryHeader = ""
+	e.Values = make(map[string]interface{})
+	e.QueuedId = queuedID(clientID)
+}
+
+// PushRcpt adds a recipient email address to the envelope
+func (e *Envelope) PushRcpt(addr Address) {
+	e.RcptTo = append(e.RcptTo, addr)
+}
+
+// Pop removes the last email address that was pushed to the envelope
+func (e *Envelope) PopRcpt() Address {
+	ret := e.RcptTo[len(e.RcptTo)-1]
+	e.RcptTo = e.RcptTo[:len(e.RcptTo)-1]
+	return ret
+}
+
+var mimeRegex, _ = regexp.Compile(`=\?(.+?)\?([QBqp])\?(.+?)\?=`)
+
+// Decode strings in Mime header format
+// eg. =?ISO-2022-JP?B?GyRCIVo9dztSOWJAOCVBJWMbKEI=?=
+// This function uses GNU iconv under the hood, for more charset support than in Go's library
+func MimeHeaderDecode(str string) string {
+
+	matched := mimeRegex.FindAllStringSubmatch(str, -1)
+	var charset, encoding, payload string
+	if matched != nil {
+		for i := 0; i < len(matched); i++ {
+			if len(matched[i]) > 2 {
+				charset = matched[i][1]
+				encoding = strings.ToUpper(matched[i][2])
+				payload = matched[i][3]
+				switch encoding {
+				case "B":
+					str = strings.Replace(
+						str,
+						matched[i][0],
+						MailTransportDecode(payload, "base64", charset),
+						1)
+				case "Q":
+					str = strings.Replace(
+						str,
+						matched[i][0],
+						MailTransportDecode(payload, "quoted-printable", charset),
+						1)
+				}
+			}
+		}
+	}
+	return str
+}
+
+// decode from 7bit to 8bit UTF-8
+// encodingType can be "base64" or "quoted-printable"
+func MailTransportDecode(str string, encodingType string, charset string) string {
+	if charset == "" {
+		charset = "UTF-8"
+	} else {
+		charset = strings.ToUpper(charset)
+	}
+	if encodingType == "base64" {
+		str = fromBase64(str)
+	} else if encodingType == "quoted-printable" {
+		str = fromQuotedP(str)
+	}
+
+	if charset != "UTF-8" {
+		charset = fixCharset(charset)
+		// iconv is pretty good at what it does
+		if cd, err := iconv.Open("UTF-8", charset); err == nil {
+			defer func() {
+				cd.Close()
+				if r := recover(); r != nil {
+					//logln(1, fmt.Sprintf("Recovered in %v", r))
+				}
+			}()
+			// eg. charset can be "ISO-2022-JP"
+			return cd.ConvString(str)
+		}
+
+	}
+	return str
+}
+
+func fromBase64(data string) string {
+	buf := bytes.NewBufferString(data)
+	decoder := base64.NewDecoder(base64.StdEncoding, buf)
+	res, _ := ioutil.ReadAll(decoder)
+	return string(res)
+}
+
+func fromQuotedP(data string) string {
+	res, _ := ioutil.ReadAll(quotedprintable.NewReader(strings.NewReader(data)))
+	return string(res)
+}
+
+var charsetRegex, _ = regexp.Compile(`[_:.\/\\]`)
+
+func fixCharset(charset string) string {
+	fixed_charset := charsetRegex.ReplaceAllString(charset, "-")
+	// Fix charset
+	// borrowed from http://squirrelmail.svn.sourceforge.net/viewvc/squirrelmail/trunk/squirrelmail/include/languages.php?revision=13765&view=markup
+	// OE ks_c_5601_1987 > cp949
+	fixed_charset = strings.Replace(fixed_charset, "ks-c-5601-1987", "cp949", -1)
+	// Moz x-euc-tw > euc-tw
+	fixed_charset = strings.Replace(fixed_charset, "x-euc", "euc", -1)
+	// Moz x-windows-949 > cp949
+	fixed_charset = strings.Replace(fixed_charset, "x-windows_", "cp", -1)
+	// windows-125x and cp125x charsets
+	fixed_charset = strings.Replace(fixed_charset, "windows-", "cp", -1)
+	// ibm > cp
+	fixed_charset = strings.Replace(fixed_charset, "ibm", "cp", -1)
+	// iso-8859-8-i -> iso-8859-8
+	fixed_charset = strings.Replace(fixed_charset, "iso-8859-8-i", "iso-8859-8", -1)
+	if charset != fixed_charset {
+		return fixed_charset
+	}
+	return charset
+}
+
+// Envelopes have their own pool
+
+type Pool struct {
+	// envelopes that are ready to be borrowed
+	pool chan *Envelope
+	// semaphore to control number of maximum borrowed envelopes
+	sem chan bool
+}
+
+func NewPool(poolSize int) *Pool {
+	return &Pool{
+		pool: make(chan *Envelope, poolSize),
+		sem:  make(chan bool, poolSize),
+	}
+}
+
+func (p *Pool) Borrow(remoteAddr string, clientID uint64) *Envelope {
+	var e *Envelope
+	p.sem <- true // block the envelope until more room
+	select {
+	case e = <-p.pool:
+		e.Reseed(remoteAddr, clientID)
+	default:
+		e = NewEnvelope(remoteAddr, clientID)
+	}
+	return e
+}
+
+// Return returns an envelope back to the envelope pool
+// Note that an envelope will not be recycled while it still is
+// processing
+func (p *Pool) Return(e *Envelope) {
+	// we down't want to recycle an envelope that may still be processing
+	isUnlocked := func() <-chan bool {
+		signal := make(chan bool)
+		// make sure envelope finished processing
+		go func() {
+			// lock will block if still processing
+			e.Lock()
+			// got the lock, it means processing finished
+			e.Unlock()
+			// generate a signal
+			signal <- true
+		}()
+		return signal
+	}()
+
+	select {
+	case <-time.After(time.Second * 30):
+		// envelope still processing, we can't recycle it.
+	case <-isUnlocked:
+		// The envelope was _unlocked_, it finished processing
+		// put back in the pool or destroy
+		select {
+		case p.pool <- e:
+			//placed envelope back in pool
+		default:
+			// pool is full, don't return
+		}
+	}
+	// take a value off the semaphore to make room for more envelopes
+	<-p.sem
+}

+ 50 - 0
mail/envelope_test.go

@@ -0,0 +1,50 @@
+package mail
+
+import (
+	"io/ioutil"
+	"strings"
+	"testing"
+)
+
+func TestMimeHeaderDecode(t *testing.T) {
+	str := MimeHeaderDecode("=?ISO-2022-JP?B?GyRCIVo9dztSOWJAOCVBJWMbKEI=?=")
+	if i := strings.Index(str, "【女子高生チャ"); i != 0 {
+		t.Error("expecting 【女子高生チャ, got:", str)
+	}
+	str = MimeHeaderDecode("=?ISO-8859-1?Q?Andr=E9?= Pirard <[email protected]>")
+	if strings.Index(str, "André Pirard") != 0 {
+		t.Error("expecting André Pirard, got:", str)
+	}
+}
+
+func TestEnvelope(t *testing.T) {
+	e := NewEnvelope("127.0.0.1", 22)
+
+	e.QueuedId = "abc123"
+	e.Helo = "helo.example.com"
+	e.MailFrom = Address{User: "test", Host: "example.com"}
+	e.TLS = true
+	e.RemoteIP = "222.111.233.121"
+	to := Address{User: "test", Host: "example.com"}
+	e.PushRcpt(to)
+	if to.String() != "[email protected]" {
+		t.Error("to does not equal [email protected], it was:", to.String())
+	}
+	e.Data.WriteString("Subject: Test\n\nThis is a test nbnb nbnb hgghgh nnnbnb nbnbnb nbnbn.")
+
+	addHead := "Delivered-To: " + to.String() + "\n"
+	addHead += "Received: from " + e.Helo + " (" + e.Helo + "  [" + e.RemoteIP + "])\n"
+	e.DeliveryHeader = addHead
+
+	r := e.NewReader()
+
+	data, _ := ioutil.ReadAll(r)
+	if len(data) != e.Len() {
+		t.Error("e.Len() is inccorrect, it shown ", e.Len(), " but we wanted ", len(data))
+	}
+	e.ParseHeaders()
+	if e.Subject != "Test" {
+		t.Error("Subject expecting: Test, got:", e.Subject)
+	}
+
+}

+ 5 - 4
pool.go

@@ -3,6 +3,7 @@ package guerrilla
 import (
 import (
 	"errors"
 	"errors"
 	"github.com/flashmob/go-guerrilla/log"
 	"github.com/flashmob/go-guerrilla/log"
+	"github.com/flashmob/go-guerrilla/mail"
 	"net"
 	"net"
 	"sync"
 	"sync"
 	"sync/atomic"
 	"sync/atomic"
@@ -18,7 +19,7 @@ type Poolable interface {
 	// ability to set read/write timeout
 	// ability to set read/write timeout
 	setTimeout(t time.Duration)
 	setTimeout(t time.Duration)
 	// set a new connection and client id
 	// set a new connection and client id
-	init(c net.Conn, clientID uint64)
+	init(c net.Conn, clientID uint64, ep *mail.Pool)
 	// get a unique id
 	// get a unique id
 	getID() uint64
 	getID() uint64
 }
 }
@@ -121,7 +122,7 @@ func (p *Pool) GetActiveClientsCount() int {
 }
 }
 
 
 // Borrow a Client from the pool. Will block if len(activeClients) > maxClients
 // Borrow a Client from the pool. Will block if len(activeClients) > maxClients
-func (p *Pool) Borrow(conn net.Conn, clientID uint64, logger log.Logger) (Poolable, error) {
+func (p *Pool) Borrow(conn net.Conn, clientID uint64, logger log.Logger, ep *mail.Pool) (Poolable, error) {
 	p.poolGuard.Lock()
 	p.poolGuard.Lock()
 	defer p.poolGuard.Unlock()
 	defer p.poolGuard.Unlock()
 
 
@@ -134,9 +135,9 @@ func (p *Pool) Borrow(conn net.Conn, clientID uint64, logger log.Logger) (Poolab
 	case p.sem <- true: // block the client from serving until there is room
 	case p.sem <- true: // block the client from serving until there is room
 		select {
 		select {
 		case c = <-p.pool:
 		case c = <-p.pool:
-			c.init(conn, clientID)
+			c.init(conn, clientID, ep)
 		default:
 		default:
-			c = NewClient(conn, clientID, logger)
+			c = NewClient(conn, clientID, logger, ep)
 		}
 		}
 		p.activeClientsAdd(c)
 		p.activeClientsAdd(c)
 
 

+ 8 - 1
response/enhanced.go

@@ -134,6 +134,7 @@ type Responses struct {
 	FailBackendNotRunning        string
 	FailBackendNotRunning        string
 	FailBackendTransaction       string
 	FailBackendTransaction       string
 	FailBackendTimeout           string
 	FailBackendTimeout           string
+	FailRcptCmd                  string
 
 
 	// The 400's
 	// The 400's
 	ErrorTooManyRecipients string
 	ErrorTooManyRecipients string
@@ -155,7 +156,6 @@ type Responses struct {
 // Called automatically during package load to build up the Responses struct
 // Called automatically during package load to build up the Responses struct
 func init() {
 func init() {
 
 
-	// There's even a Wikipedia page for canned responses: https://en.wikipedia.org/wiki/Canned_response
 	Canned = Responses{}
 	Canned = Responses{}
 
 
 	Canned.FailLineTooLong = (&Response{
 	Canned.FailLineTooLong = (&Response{
@@ -337,6 +337,13 @@ func init() {
 		Comment:      "Error: transaction timeout",
 		Comment:      "Error: transaction timeout",
 	}).String()
 	}).String()
 
 
+	Canned.FailRcptCmd = (&Response{
+		EnhancedCode: BadDestinationMailboxAddress,
+		BasicCode:    550,
+		Class:        ClassPermanentFailure,
+		Comment:      "User unknown in local recipient table",
+	}).String()
+
 }
 }
 
 
 // DefaultMap contains defined default codes (RfC 3463)
 // DefaultMap contains defined default codes (RfC 3463)

+ 64 - 40
server.go

@@ -6,15 +6,14 @@ import (
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"net"
 	"net"
-	"runtime"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 	"sync/atomic"
 	"sync/atomic"
 	"time"
 	"time"
 
 
 	"github.com/flashmob/go-guerrilla/backends"
 	"github.com/flashmob/go-guerrilla/backends"
-	"github.com/flashmob/go-guerrilla/envelope"
 	"github.com/flashmob/go-guerrilla/log"
 	"github.com/flashmob/go-guerrilla/log"
+	"github.com/flashmob/go-guerrilla/mail"
 	"github.com/flashmob/go-guerrilla/response"
 	"github.com/flashmob/go-guerrilla/response"
 )
 )
 
 
@@ -47,7 +46,6 @@ const (
 // Server listens for SMTP clients on the port specified in its config
 // Server listens for SMTP clients on the port specified in its config
 type server struct {
 type server struct {
 	configStore     atomic.Value // stores guerrilla.ServerConfig
 	configStore     atomic.Value // stores guerrilla.ServerConfig
-	backend         backends.Backend
 	tlsConfigStore  atomic.Value
 	tlsConfigStore  atomic.Value
 	timeout         atomic.Value // stores time.Duration
 	timeout         atomic.Value // stores time.Duration
 	listenInterface string
 	listenInterface string
@@ -62,23 +60,26 @@ type server struct {
 	// If log changed after a config reload, newLogStore stores the value here until it's safe to change it
 	// If log changed after a config reload, newLogStore stores the value here until it's safe to change it
 	logStore     atomic.Value
 	logStore     atomic.Value
 	mainlogStore atomic.Value
 	mainlogStore atomic.Value
+	backendStore atomic.Value
+	envelopePool *mail.Pool
 }
 }
 
 
 type allowedHosts struct {
 type allowedHosts struct {
-	table map[string]bool // host lookup table
-	m     sync.Mutex      // guard access to the map
+	table      map[string]bool // host lookup table
+	sync.Mutex                 // guard access to the map
 }
 }
 
 
 // Creates and returns a new ready-to-run Server from a configuration
 // Creates and returns a new ready-to-run Server from a configuration
 func newServer(sc *ServerConfig, b backends.Backend, l log.Logger) (*server, error) {
 func newServer(sc *ServerConfig, b backends.Backend, l log.Logger) (*server, error) {
 	server := &server{
 	server := &server{
-		backend:         b,
 		clientPool:      NewPool(sc.MaxClients),
 		clientPool:      NewPool(sc.MaxClients),
 		closedListener:  make(chan (bool), 1),
 		closedListener:  make(chan (bool), 1),
 		listenInterface: sc.ListenInterface,
 		listenInterface: sc.ListenInterface,
 		state:           ServerStateNew,
 		state:           ServerStateNew,
 		mainlog:         l,
 		mainlog:         l,
+		envelopePool:    mail.NewPool(sc.MaxClients),
 	}
 	}
+	server.backendStore.Store(b)
 	var logOpenError error
 	var logOpenError error
 	if sc.LogFile == "" {
 	if sc.LogFile == "" {
 		// none set, use the same log file as mainlog
 		// none set, use the same log file as mainlog
@@ -136,6 +137,19 @@ func (s *server) configureLog() {
 	}
 	}
 }
 }
 
 
+// setBackend Sets the backend to use for processing email envelopes
+func (s *server) setBackend(b backends.Backend) {
+	s.backendStore.Store(b)
+}
+
+// backend gets the backend used to process email envelopes
+func (s *server) backend() backends.Backend {
+	if b, ok := s.backendStore.Load().(backends.Backend); ok {
+		return b
+	}
+	return nil
+}
+
 // Set the timeout for the server and all clients
 // Set the timeout for the server and all clients
 func (server *server) setTimeout(seconds int) {
 func (server *server) setTimeout(seconds int) {
 	duration := time.Duration(int64(seconds))
 	duration := time.Duration(int64(seconds))
@@ -156,8 +170,8 @@ func (server *server) isEnabled() bool {
 
 
 // Set the allowed hosts for the server
 // Set the allowed hosts for the server
 func (server *server) setAllowedHosts(allowedHosts []string) {
 func (server *server) setAllowedHosts(allowedHosts []string) {
-	defer server.hosts.m.Unlock()
-	server.hosts.m.Lock()
+	server.hosts.Lock()
+	defer server.hosts.Unlock()
 	server.hosts.table = make(map[string]bool, len(allowedHosts))
 	server.hosts.table = make(map[string]bool, len(allowedHosts))
 	for _, h := range allowedHosts {
 	for _, h := range allowedHosts {
 		server.hosts.table[strings.ToLower(h)] = true
 		server.hosts.table[strings.ToLower(h)] = true
@@ -204,6 +218,7 @@ func (server *server) Start(startWG *sync.WaitGroup) error {
 			c := p.(*client)
 			c := p.(*client)
 			if borrow_err == nil {
 			if borrow_err == nil {
 				server.handleClient(c)
 				server.handleClient(c)
+				server.envelopePool.Return(c.Envelope)
 				server.clientPool.Return(c)
 				server.clientPool.Return(c)
 			} else {
 			} else {
 				server.log.WithError(borrow_err).Info("couldn't borrow a new client")
 				server.log.WithError(borrow_err).Info("couldn't borrow a new client")
@@ -213,7 +228,7 @@ func (server *server) Start(startWG *sync.WaitGroup) error {
 			}
 			}
 			// intentionally placed Borrow in args so that it's called in the
 			// intentionally placed Borrow in args so that it's called in the
 			// same main goroutine.
 			// same main goroutine.
-		}(server.clientPool.Borrow(conn, clientID, server.log))
+		}(server.clientPool.Borrow(conn, clientID, server.log, server.envelopePool))
 
 
 	}
 	}
 }
 }
@@ -239,8 +254,8 @@ func (server *server) GetActiveClientsCount() int {
 
 
 // Verifies that the host is a valid recipient.
 // Verifies that the host is a valid recipient.
 func (server *server) allowsHost(host string) bool {
 func (server *server) allowsHost(host string) bool {
-	defer server.hosts.m.Unlock()
-	server.hosts.m.Lock()
+	server.hosts.Lock()
+	defer server.hosts.Unlock()
 	if _, ok := server.hosts.table[strings.ToLower(host)]; ok {
 	if _, ok := server.hosts.table[strings.ToLower(host)]; ok {
 		return true
 		return true
 	}
 	}
@@ -297,9 +312,9 @@ func (server *server) handleClient(client *client) {
 	}).Info("Handle client")
 	}).Info("Handle client")
 
 
 	// Initial greeting
 	// Initial greeting
-	greeting := fmt.Sprintf("220 %s SMTP Guerrilla(%s) #%d (%d) %s gr:%d",
+	greeting := fmt.Sprintf("220 %s SMTP Guerrilla(%s) #%d (%d) %s",
 		sc.Hostname, Version, client.ID,
 		sc.Hostname, Version, client.ID,
-		server.clientPool.GetActiveClientsCount(), time.Now().Format(time.RFC3339), runtime.NumGoroutine())
+		server.clientPool.GetActiveClientsCount(), time.Now().Format(time.RFC3339))
 
 
 	helo := fmt.Sprintf("250 %s Hello", sc.Hostname)
 	helo := fmt.Sprintf("250 %s Hello", sc.Hostname)
 	// ehlo is a multi-line reply and need additional \r\n at the end
 	// ehlo is a multi-line reply and need additional \r\n at the end
@@ -321,7 +336,7 @@ func (server *server) handleClient(client *client) {
 		} else if err := client.upgradeToTLS(tlsConfig); err == nil {
 		} else if err := client.upgradeToTLS(tlsConfig); err == nil {
 			advertiseTLS = ""
 			advertiseTLS = ""
 		} else {
 		} else {
-			server.log.WithError(err).Warnf("[%s] Failed TLS handshake", client.RemoteAddress)
+			server.log.WithError(err).Warnf("[%s] Failed TLS handshake", client.RemoteIP)
 			// server requires TLS, but can't handshake
 			// server requires TLS, but can't handshake
 			client.kill()
 			client.kill()
 		}
 		}
@@ -341,17 +356,17 @@ func (server *server) handleClient(client *client) {
 			input, err := server.readCommand(client, sc.MaxSize)
 			input, err := server.readCommand(client, sc.MaxSize)
 			server.log.Debugf("Client sent: %s", input)
 			server.log.Debugf("Client sent: %s", input)
 			if err == io.EOF {
 			if err == io.EOF {
-				server.log.WithError(err).Warnf("Client closed the connection: %s", client.RemoteAddress)
+				server.log.WithError(err).Warnf("Client closed the connection: %s", client.RemoteIP)
 				return
 				return
 			} else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
 			} else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
-				server.log.WithError(err).Warnf("Timeout: %s", client.RemoteAddress)
+				server.log.WithError(err).Warnf("Timeout: %s", client.RemoteIP)
 				return
 				return
 			} else if err == LineLimitExceeded {
 			} else if err == LineLimitExceeded {
 				client.sendResponse(response.Canned.FailLineTooLong)
 				client.sendResponse(response.Canned.FailLineTooLong)
 				client.kill()
 				client.kill()
 				break
 				break
 			} else if err != nil {
 			} else if err != nil {
-				server.log.WithError(err).Warnf("Read error: %s", client.RemoteAddress)
+				server.log.WithError(err).Warnf("Read error: %s", client.RemoteIP)
 				client.kill()
 				client.kill()
 				break
 				break
 			}
 			}
@@ -391,28 +406,29 @@ func (server *server) handleClient(client *client) {
 					client.sendResponse(response.Canned.FailNestedMailCmd)
 					client.sendResponse(response.Canned.FailNestedMailCmd)
 					break
 					break
 				}
 				}
-				mail := input[10:]
-				from := envelope.EmailAddress{}
-
-				if !(strings.Index(mail, "<>") == 0) &&
-					!(strings.Index(mail, " <>") == 0) {
+				addr := input[10:]
+				if !(strings.Index(addr, "<>") == 0) &&
+					!(strings.Index(addr, " <>") == 0) {
 					// Not Bounce, extract mail.
 					// Not Bounce, extract mail.
-					from, err = extractEmail(mail)
-				}
+					if from, err := extractEmail(addr); err != nil {
+						client.sendResponse(err)
+						break
+					} else {
+						client.MailFrom = from
+						server.log.WithFields(map[string]interface{}{
+							"event":   "mailfrom",
+							"helo":    client.Helo,
+							"domain":  from.Host,
+							"address": getRemoteAddr(client.conn),
+							"id":      client.ID,
+						}).Info("Mail from")
+					}
 
 
-				if err != nil {
-					client.sendResponse(err)
 				} else {
 				} else {
-					server.log.WithFields(map[string]interface{}{
-						"event":   "mailfrom",
-						"helo":    client.Helo,
-						"domain":  from.Host,
-						"address": client.RemoteAddress,
-						"id":      client.ID,
-					}).Info("Mail from")
-					client.MailFrom = from
-					client.sendResponse(response.Canned.SuccessMailCmd)
+					// bounce has empty from address
+					client.MailFrom = mail.Address{}
 				}
 				}
+				client.sendResponse(response.Canned.SuccessMailCmd)
 
 
 			case strings.Index(cmd, "RCPT TO:") == 0:
 			case strings.Index(cmd, "RCPT TO:") == 0:
 				if len(client.RcptTo) > RFC2821LimitRecipients {
 				if len(client.RcptTo) > RFC2821LimitRecipients {
@@ -426,8 +442,15 @@ func (server *server) handleClient(client *client) {
 					if !server.allowsHost(to.Host) {
 					if !server.allowsHost(to.Host) {
 						client.sendResponse(response.Canned.ErrorRelayDenied, to.Host)
 						client.sendResponse(response.Canned.ErrorRelayDenied, to.Host)
 					} else {
 					} else {
-						client.RcptTo = append(client.RcptTo, to)
-						client.sendResponse(response.Canned.SuccessRcptCmd)
+						client.PushRcpt(to)
+						rcptError := server.backend().ValidateRcpt(client.Envelope)
+						if rcptError != nil {
+							client.PopRcpt()
+							client.sendResponse(response.Canned.FailRcptCmd + " " + rcptError.Error())
+						} else {
+							client.sendResponse(response.Canned.SuccessRcptCmd)
+						}
+
 					}
 					}
 				}
 				}
 
 
@@ -493,15 +516,16 @@ func (server *server) handleClient(client *client) {
 					client.kill()
 					client.kill()
 				}
 				}
 				server.log.WithError(err).Warn("Error reading data")
 				server.log.WithError(err).Warn("Error reading data")
+				client.resetTransaction()
 				break
 				break
 			}
 			}
 
 
-			res := server.backend.Process(client.Envelope)
+			res := server.backend().Process(client.Envelope)
 			if res.Code() < 300 {
 			if res.Code() < 300 {
 				client.messagesSent++
 				client.messagesSent++
 				server.log.WithFields(map[string]interface{}{
 				server.log.WithFields(map[string]interface{}{
 					"helo":          client.Helo,
 					"helo":          client.Helo,
-					"remoteAddress": client.RemoteAddress,
+					"remoteAddress": getRemoteAddr(client.conn),
 					"success":       true,
 					"success":       true,
 				}).Info("Received message")
 				}).Info("Received message")
 			}
 			}
@@ -521,7 +545,7 @@ func (server *server) handleClient(client *client) {
 					advertiseTLS = ""
 					advertiseTLS = ""
 					client.resetTransaction()
 					client.resetTransaction()
 				} else {
 				} else {
-					server.log.WithError(err).Warnf("[%s] Failed TLS handshake", client.RemoteAddress)
+					server.log.WithError(err).Warnf("[%s] Failed TLS handshake", client.RemoteIP)
 					// Don't disconnect, let the client decide if it wants to continue
 					// Don't disconnect, let the client decide if it wants to continue
 				}
 				}
 			}
 			}

+ 8 - 6
server_test.go

@@ -4,13 +4,13 @@ import (
 	"testing"
 	"testing"
 
 
 	"bufio"
 	"bufio"
-	"fmt"
 	"net/textproto"
 	"net/textproto"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 
 
 	"github.com/flashmob/go-guerrilla/backends"
 	"github.com/flashmob/go-guerrilla/backends"
 	"github.com/flashmob/go-guerrilla/log"
 	"github.com/flashmob/go-guerrilla/log"
+	"github.com/flashmob/go-guerrilla/mail"
 	"github.com/flashmob/go-guerrilla/mocks"
 	"github.com/flashmob/go-guerrilla/mocks"
 )
 )
 
 
@@ -42,7 +42,9 @@ func getMockServerConn(sc *ServerConfig, t *testing.T) (*mocks.Conn, *server) {
 	if logOpenError != nil {
 	if logOpenError != nil {
 		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
 		mainlog.WithError(logOpenError).Errorf("Failed creating a logger for mock conn [%s]", sc.ListenInterface)
 	}
 	}
-	backend, err := backends.New("dummy", backends.BackendConfig{"log_received_mails": true}, mainlog)
+	backend, err := backends.New(
+		backends.BackendConfig{"log_received_mails": true, "save_workers_size": 1},
+		mainlog)
 	if err != nil {
 	if err != nil {
 		t.Error("new dummy backend failed because:", err)
 		t.Error("new dummy backend failed because:", err)
 	}
 	}
@@ -66,7 +68,7 @@ func TestHandleClient(t *testing.T) {
 	}
 	}
 	conn, server := getMockServerConn(sc, t)
 	conn, server := getMockServerConn(sc, t)
 	// call the serve.handleClient() func in a goroutine.
 	// call the serve.handleClient() func in a goroutine.
-	client := NewClient(conn.Server, 1, mainlog)
+	client := NewClient(conn.Server, 1, mainlog, mail.NewPool(5))
 	var wg sync.WaitGroup
 	var wg sync.WaitGroup
 	wg.Add(1)
 	wg.Add(1)
 	go func() {
 	go func() {
@@ -76,14 +78,14 @@ func TestHandleClient(t *testing.T) {
 	// Wait for the greeting from the server
 	// Wait for the greeting from the server
 	r := textproto.NewReader(bufio.NewReader(conn.Client))
 	r := textproto.NewReader(bufio.NewReader(conn.Client))
 	line, _ := r.ReadLine()
 	line, _ := r.ReadLine()
-	fmt.Println(line)
+	//	fmt.Println(line)
 	w := textproto.NewWriter(bufio.NewWriter(conn.Client))
 	w := textproto.NewWriter(bufio.NewWriter(conn.Client))
 	w.PrintfLine("HELO test.test.com")
 	w.PrintfLine("HELO test.test.com")
 	line, _ = r.ReadLine()
 	line, _ = r.ReadLine()
-	fmt.Println(line)
+	//fmt.Println(line)
 	w.PrintfLine("QUIT")
 	w.PrintfLine("QUIT")
 	line, _ = r.ReadLine()
 	line, _ = r.ReadLine()
-	fmt.Println("line is:", line)
+	//fmt.Println("line is:", line)
 	expected := "221 2.0.0 Bye"
 	expected := "221 2.0.0 Bye"
 	if strings.Index(line, expected) != 0 {
 	if strings.Index(line, expected) != 0 {
 		t.Error("expected", expected, "but got:", line)
 		t.Error("expected", expected, "but got:", line)

+ 12 - 7
tests/guerrilla_test.go

@@ -64,7 +64,7 @@ func init() {
 	} else {
 	} else {
 		setupCerts(config)
 		setupCerts(config)
 		logger, _ = log.GetLogger(config.LogFile)
 		logger, _ = log.GetLogger(config.LogFile)
-		backend, _ := getBackend("dummy", config.BackendConfig, logger)
+		backend, _ := getBackend(config.BackendConfig, logger)
 		app, _ = guerrilla.New(&config.AppConfig, backend, logger)
 		app, _ = guerrilla.New(&config.AppConfig, backend, logger)
 	}
 	}
 
 
@@ -75,9 +75,8 @@ var configJson = `
 {
 {
     "log_file" : "./testlog",
     "log_file" : "./testlog",
     "log_level" : "debug",
     "log_level" : "debug",
-    "pid_file" : "/var/run/go-guerrilla.pid",
+    "pid_file" : "go-guerrilla.pid",
     "allowed_hosts": ["spam4.me","grr.la"],
     "allowed_hosts": ["spam4.me","grr.la"],
-    "backend_name" : "dummy",
     "backend_config" :
     "backend_config" :
         {
         {
             "log_received_mails" : true
             "log_received_mails" : true
@@ -114,8 +113,13 @@ var configJson = `
 }
 }
 `
 `
 
 
-func getBackend(backendName string, backendConfig map[string]interface{}, l log.Logger) (backends.Backend, error) {
-	return backends.New(backendName, backendConfig, l)
+func getBackend(backendConfig map[string]interface{}, l log.Logger) (backends.Backend, error) {
+	b, err := backends.New(backendConfig, l)
+	if err != nil {
+		fmt.Println("backend init error", err)
+		os.Exit(1)
+	}
+	return b, err
 }
 }
 
 
 func setupCerts(c *TestConfig) {
 func setupCerts(c *TestConfig) {
@@ -189,7 +193,6 @@ func TestGreeting(t *testing.T) {
 		t.FailNow()
 		t.FailNow()
 	}
 	}
 	if startErrors := app.Start(); startErrors == nil {
 	if startErrors := app.Start(); startErrors == nil {
-
 		// 1. plaintext connection
 		// 1. plaintext connection
 		conn, err := net.Dial("tcp", config.Servers[0].ListenInterface)
 		conn, err := net.Dial("tcp", config.Servers[0].ListenInterface)
 		if err != nil {
 		if err != nil {
@@ -237,6 +240,7 @@ func TestGreeting(t *testing.T) {
 		conn.Close()
 		conn.Close()
 
 
 	} else {
 	} else {
+		fmt.Println("Nope", startErrors)
 		if startErrors := app.Start(); startErrors != nil {
 		if startErrors := app.Start(); startErrors != nil {
 			t.Error(startErrors)
 			t.Error(startErrors)
 			t.FailNow()
 			t.FailNow()
@@ -333,6 +337,7 @@ func TestRFC2821LimitRecipients(t *testing.T) {
 			}
 			}
 
 
 			for i := 0; i < 101; i++ {
 			for i := 0; i < 101; i++ {
+				//fmt.Println(fmt.Sprintf("RCPT TO:test%[email protected]", i))
 				if _, err := Command(conn, bufin, fmt.Sprintf("RCPT TO:test%[email protected]", i)); err != nil {
 				if _, err := Command(conn, bufin, fmt.Sprintf("RCPT TO:test%[email protected]", i)); err != nil {
 					t.Error("RCPT TO", err.Error())
 					t.Error("RCPT TO", err.Error())
 					break
 					break
@@ -1097,7 +1102,7 @@ func TestDataCommand(t *testing.T) {
 				bufin,
 				bufin,
 				email+"\r\n.\r\n")
 				email+"\r\n.\r\n")
 			//expected := "500 Line too long"
 			//expected := "500 Line too long"
-			expected := "250 2.0.0 OK : queued as s0m3l337Ha5hva1u3LOL"
+			expected := "250 2.0.0 OK : queued as "
 			if strings.Index(response, expected) != 0 {
 			if strings.Index(response, expected) != 0 {
 				t.Error("Server did not respond with", expected, ", it said:"+response, err)
 				t.Error("Server did not respond with", expected, ", it said:"+response, err)
 			}
 			}

+ 4 - 4
util.go

@@ -5,14 +5,14 @@ import (
 	"regexp"
 	"regexp"
 	"strings"
 	"strings"
 
 
-	"github.com/flashmob/go-guerrilla/envelope"
+	"github.com/flashmob/go-guerrilla/mail"
 	"github.com/flashmob/go-guerrilla/response"
 	"github.com/flashmob/go-guerrilla/response"
 )
 )
 
 
 var extractEmailRegex, _ = regexp.Compile(`<(.+?)@(.+?)>`) // go home regex, you're drunk!
 var extractEmailRegex, _ = regexp.Compile(`<(.+?)@(.+?)>`) // go home regex, you're drunk!
 
 
-func extractEmail(str string) (envelope.EmailAddress, error) {
-	email := envelope.EmailAddress{}
+func extractEmail(str string) (mail.Address, error) {
+	email := mail.Address{}
 	var err error
 	var err error
 	if len(str) > RFC2821LimitPath {
 	if len(str) > RFC2821LimitPath {
 		return email, errors.New(response.Canned.FailPathTooLong)
 		return email, errors.New(response.Canned.FailPathTooLong)
@@ -21,7 +21,7 @@ func extractEmail(str string) (envelope.EmailAddress, error) {
 		email.User = matched[1]
 		email.User = matched[1]
 		email.Host = validHost(matched[2])
 		email.Host = validHost(matched[2])
 	} else if res := strings.Split(str, "@"); len(res) > 1 {
 	} else if res := strings.Split(str, "@"); len(res) > 1 {
-		email.User = res[0]
+		email.User = strings.TrimSpace(res[0])
 		email.Host = validHost(res[1])
 		email.Host = validHost(res[1])
 	}
 	}
 	err = nil
 	err = nil