guerrilla_db_redis.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. package backends
  2. // This backend is presented here as an example only, please modify it to your needs.
  3. // The backend stores the email data in Redis.
  4. // Other meta-information is stored in MySQL to be joined later.
  5. // A lot of email gets discarded without viewing on Guerrilla Mail,
  6. // so it's much faster to put in Redis, where other programs can
  7. // process it later, without touching the disk.
  8. // Short history:
  9. // Started with issuing an insert query for each single email and another query to update the tally
  10. // Then applied the following optimizations:
  11. // - Moved tally updates to another background process which does the tallying in a single query
  12. // - Changed the MySQL queries to insert in batch
  13. // - Made a Compressor that recycles buffers using sync.Pool
  14. // The result was around 400% speed improvement. If you know of any more improvements, please share!
  15. import (
  16. "fmt"
  17. "time"
  18. log "github.com/Sirupsen/logrus"
  19. "github.com/garyburd/redigo/redis"
  20. "bytes"
  21. "compress/zlib"
  22. "github.com/flashmob/go-guerrilla/envelope"
  23. "github.com/ziutek/mymysql/autorc"
  24. _ "github.com/ziutek/mymysql/godrv"
  25. "io"
  26. "sync"
  27. )
  28. // how many rows to batch at a time
  29. const GuerrillaDBAndRedisBatchMax = 500
  30. // tick on every...
  31. const GuerrillaDBAndRedisBatchTimeout = time.Second * 3
  32. func init() {
  33. backends["guerrilla-db-redis"] = &AbstractBackend{
  34. extend: &GuerrillaDBAndRedisBackend{}}
  35. }
  36. type GuerrillaDBAndRedisBackend struct {
  37. AbstractBackend
  38. config guerrillaDBAndRedisConfig
  39. batcherWg sync.WaitGroup
  40. // cache prepared queries
  41. cache stmtCache
  42. }
  43. // statement cache. It's an array, not slice
  44. type stmtCache [GuerrillaDBAndRedisBatchMax]*autorc.Stmt
  45. type guerrillaDBAndRedisConfig struct {
  46. NumberOfWorkers int `json:"save_workers_size"`
  47. MysqlTable string `json:"mail_table"`
  48. MysqlDB string `json:"mysql_db"`
  49. MysqlHost string `json:"mysql_host"`
  50. MysqlPass string `json:"mysql_pass"`
  51. MysqlUser string `json:"mysql_user"`
  52. RedisExpireSeconds int `json:"redis_expire_seconds"`
  53. RedisInterface string `json:"redis_interface"`
  54. PrimaryHost string `json:"primary_mail_host"`
  55. }
  56. func convertError(name string) error {
  57. return fmt.Errorf("failed to load backend config (%s)", name)
  58. }
  59. // Load the backend config for the backend. It has already been unmarshalled
  60. // from the main config file 'backend' config "backend_config"
  61. // Now we need to convert each type and copy into the guerrillaDBAndRedisConfig struct
  62. func (g *GuerrillaDBAndRedisBackend) loadConfig(backendConfig BackendConfig) (err error) {
  63. configType := baseConfig(&guerrillaDBAndRedisConfig{})
  64. bcfg, err := g.extractConfig(backendConfig, configType)
  65. if err != nil {
  66. return err
  67. }
  68. m := bcfg.(*guerrillaDBAndRedisConfig)
  69. g.config = *m
  70. return nil
  71. }
  72. func (g *GuerrillaDBAndRedisBackend) getNumberOfWorkers() int {
  73. return g.config.NumberOfWorkers
  74. }
  75. func (g *GuerrillaDBAndRedisBackend) Process(mail *envelope.Envelope) BackendResult {
  76. to := mail.RcptTo
  77. log.Info("(g *GuerrillaDBAndRedisBackend) Process called")
  78. if len(to) == 0 {
  79. return NewBackendResult("554 Error: no recipient")
  80. }
  81. return nil
  82. }
  83. type redisClient struct {
  84. isConnected bool
  85. conn redis.Conn
  86. time int
  87. }
  88. // compressedData struct will be compressed using zlib when printed via fmt
  89. type compressedData struct {
  90. extraHeaders []byte
  91. data *bytes.Buffer
  92. pool sync.Pool
  93. }
  94. // newCompressedData returns a new CompressedData
  95. func newCompressedData() *compressedData {
  96. var p = sync.Pool{
  97. New: func() interface{} {
  98. var b bytes.Buffer
  99. return &b
  100. },
  101. }
  102. return &compressedData{
  103. pool: p,
  104. }
  105. }
  106. // Set the extraheaders and buffer of data to compress
  107. func (c *compressedData) set(b []byte, d *bytes.Buffer) {
  108. c.extraHeaders = b
  109. c.data = d
  110. }
  111. // implement Stringer interface
  112. func (c *compressedData) String() string {
  113. if c.data == nil {
  114. return ""
  115. }
  116. //borrow a buffer form the pool
  117. b := c.pool.Get().(*bytes.Buffer)
  118. // put back in the pool
  119. defer func() {
  120. b.Reset()
  121. c.pool.Put(b)
  122. }()
  123. var r *bytes.Reader
  124. w, _ := zlib.NewWriterLevel(b, zlib.BestSpeed)
  125. r = bytes.NewReader(c.extraHeaders)
  126. io.Copy(w, r)
  127. io.Copy(w, c.data)
  128. w.Close()
  129. return b.String()
  130. }
  131. // clear it, without clearing the pool
  132. func (c *compressedData) clear() {
  133. c.extraHeaders = []byte{}
  134. c.data = nil
  135. }
  136. // prepares the sql query with the number of rows that can be batched with it
  137. func (g *GuerrillaDBAndRedisBackend) prepareInsertQuery(rows int, db *autorc.Conn) *autorc.Stmt {
  138. if rows == 0 {
  139. panic("rows argument cannot be 0")
  140. }
  141. if g.cache[rows-1] != nil {
  142. return g.cache[rows-1]
  143. }
  144. sql := "INSERT INTO " + g.config.MysqlTable + " "
  145. sql += "(`date`, `to`, `from`, `subject`, `body`, `charset`, `mail`, `spam_score`, `hash`, `content_type`, `recipient`, `has_attach`, `ip_addr`, `return_path`, `is_tls`)"
  146. sql += " values "
  147. values := "(NOW(), ?, ?, ?, ? , 'UTF-8' , ?, 0, ?, '', ?, 0, ?, ?, ?)"
  148. // add more rows
  149. comma := ""
  150. for i := 0; i < rows; i++ {
  151. sql += comma + values
  152. if comma == "" {
  153. comma = ","
  154. }
  155. }
  156. //log.Debug("Prepared SQL", rows, sql)
  157. stmt, sqlErr := db.Prepare(sql)
  158. if sqlErr != nil {
  159. log.WithError(sqlErr).Fatalf("failed while db.Prepare(INSERT...)")
  160. }
  161. // cache it
  162. g.cache[rows-1] = stmt
  163. return stmt
  164. }
  165. // Batches the rows from the feeder chan in to a single INSERT statement.
  166. // Execute the batches query when:
  167. // - number of batched rows reaches a threshold, i.e. count n = threshold
  168. // - or, no new rows within a certain time, i.e. times out
  169. func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(feeder chan []interface{}, db *autorc.Conn) {
  170. // controls shutdown
  171. defer g.batcherWg.Done()
  172. g.batcherWg.Add(1)
  173. // vals is where values are batched to
  174. var vals []interface{}
  175. // how many rows were batched
  176. count := 0
  177. // The timer will tick every second.
  178. // Interrupting the select clause when there's no data on the feeder channel
  179. t := time.NewTimer(GuerrillaDBAndRedisBatchTimeout)
  180. // prepare the query used to insert when rows reaches batchMax
  181. insertStmt := g.prepareInsertQuery(GuerrillaDBAndRedisBatchMax, db)
  182. // inserts executes a batched insert query, clears the vals and resets the count
  183. insert := func(c int) {
  184. if c > 0 {
  185. insertStmt = g.prepareInsertQuery(c, db)
  186. insertStmt.Bind(vals...)
  187. _, _, err := insertStmt.Exec()
  188. if err != nil {
  189. log.WithError(err).Error("There was a problem the insert")
  190. } else {
  191. //log.Debugf("Inserted %d rows ", count)
  192. }
  193. }
  194. vals = nil
  195. count = 0
  196. }
  197. // Keep getting values from feeder and add to batch.
  198. // if feeder times out, execute the batched query
  199. // otherwise, execute the batched query once it reaches the GuerrillaDBAndRedisBatchMax threshold
  200. for {
  201. select {
  202. case row := <-feeder:
  203. log.Info("row form chan is", row, "cols:", len(row))
  204. if row == nil {
  205. log.Debug("Query batchaer exiting")
  206. // Insert any remaining rows
  207. insert(count)
  208. return
  209. }
  210. vals = append(vals, row...)
  211. count++
  212. //log.Debug("apend vals", count, vals)
  213. if count == GuerrillaDBAndRedisBatchMax {
  214. insert(GuerrillaDBAndRedisBatchMax)
  215. }
  216. // stop timer from firing (reset the interrupt)
  217. if !t.Stop() {
  218. <-t.C
  219. }
  220. t.Reset(GuerrillaDBAndRedisBatchTimeout)
  221. case <-t.C:
  222. //log.Debugf("Query batcher timer fired! [%d]", len(vals))
  223. //log.Debug("Contents:", count, vals)
  224. // anything to insert?
  225. if n := len(vals); n > 0 {
  226. insert(count)
  227. }
  228. t.Reset(GuerrillaDBAndRedisBatchTimeout)
  229. }
  230. }
  231. }
  232. func (g *GuerrillaDBAndRedisBackend) saveMailWorker(saveMailChan chan *savePayload) {
  233. var to, body string
  234. //var length int
  235. //var err error
  236. var redisErr error
  237. redisClient := &redisClient{}
  238. db := autorc.New(
  239. "tcp",
  240. "",
  241. g.config.MysqlHost,
  242. g.config.MysqlUser,
  243. g.config.MysqlPass,
  244. g.config.MysqlDB)
  245. db.Register("set names utf8")
  246. // start the query SQL batching where we will send data via the feeder channel
  247. feeder := make(chan []interface{}, 1)
  248. go g.insertQueryBatcher(feeder, db)
  249. defer func() {
  250. if r := recover(); r != nil {
  251. //recover form closed channel
  252. fmt.Println("Recovered in f", r)
  253. }
  254. if db.Raw != nil {
  255. db.Raw.Close()
  256. }
  257. if redisClient.conn != nil {
  258. log.Infof("closed redis")
  259. redisClient.conn.Close()
  260. }
  261. // close the feeder & wait for query batcher to exit.
  262. close(feeder)
  263. g.batcherWg.Wait()
  264. }()
  265. var vals []interface{}
  266. data := newCompressedData()
  267. // receives values from the channel repeatedly until it is closed.
  268. for {
  269. payload := <-saveMailChan
  270. if payload == nil {
  271. log.Debug("No more saveMailChan payload")
  272. return
  273. }
  274. to = payload.recipient.User + "@" + g.config.PrimaryHost
  275. ts := fmt.Sprintf("%d", time.Now().UnixNano())
  276. payload.mail.ParseHeaders()
  277. hash := MD5Hex(
  278. to,
  279. payload.mail.MailFrom.String(),
  280. payload.mail.Subject,
  281. ts)
  282. // Add extra headers
  283. var addHead string
  284. addHead += "Delivered-To: " + to + "\r\n"
  285. addHead += "Received: from " + payload.mail.Helo + " (" + payload.mail.Helo + " [" + payload.mail.RemoteAddress + "])\r\n"
  286. addHead += " by " + payload.recipient.Host + " with SMTP id " + hash + "@" + payload.recipient.Host + ";\r\n"
  287. addHead += " " + time.Now().Format(time.RFC1123Z) + "\r\n"
  288. // data will be compressed when printed, with addHead added to beginning
  289. data.set([]byte(addHead), &payload.mail.Data)
  290. body = "gzencode"
  291. // data will be written to redis - it implements the Stringer interface, redigo uses fmt to
  292. // print the data to redis.
  293. redisErr = redisClient.redisConnection(g.config.RedisInterface)
  294. if redisErr == nil {
  295. _, doErr := redisClient.conn.Do("SETEX", hash, g.config.RedisExpireSeconds, data)
  296. if doErr == nil {
  297. //payload.mail.Data = ""
  298. //payload.mail.Data.Reset()
  299. body = "redis" // the backend system will know to look in redis for the message data
  300. data.clear() // blank
  301. }
  302. } else {
  303. log.WithError(redisErr).Warn("Error while SETEX on redis")
  304. }
  305. vals = []interface{}{} // clear the vals
  306. vals = append(vals,
  307. to,
  308. payload.mail.MailFrom.String(),
  309. payload.mail.Subject,
  310. body,
  311. data.String(),
  312. hash,
  313. to,
  314. payload.mail.RemoteAddress,
  315. payload.mail.MailFrom.String(),
  316. payload.mail.TLS)
  317. feeder <- vals
  318. payload.savedNotify <- &saveStatus{nil, hash}
  319. }
  320. }
  321. func (c *redisClient) redisConnection(redisInterface string) (err error) {
  322. if c.isConnected == false {
  323. c.conn, err = redis.Dial("tcp", redisInterface)
  324. if err != nil {
  325. // handle error
  326. return err
  327. }
  328. c.isConnected = true
  329. }
  330. return nil
  331. }
  332. // test database connection settings
  333. func (g *GuerrillaDBAndRedisBackend) testSettings() (err error) {
  334. db := autorc.New(
  335. "tcp",
  336. "",
  337. g.config.MysqlHost,
  338. g.config.MysqlUser,
  339. g.config.MysqlPass,
  340. g.config.MysqlDB)
  341. if mysqlErr := db.Raw.Connect(); mysqlErr != nil {
  342. err = fmt.Errorf("MySql cannot connect, check your settings: %s", mysqlErr)
  343. } else {
  344. db.Raw.Close()
  345. }
  346. redisClient := &redisClient{}
  347. if redisErr := redisClient.redisConnection(g.config.RedisInterface); redisErr != nil {
  348. err = fmt.Errorf("Redis cannot connect, check your settings: %s", redisErr)
  349. }
  350. return
  351. }