guerrilla_db_redis.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. package backends
  2. // This backend is presented here as an example only, please modify it to your needs.
  3. // The backend stores the email data in Redis.
  4. // Other meta-information is stored in MySQL to be joined later.
  5. // A lot of email gets discarded without viewing on Guerrilla Mail,
  6. // so it's much faster to put in Redis, where other programs can
  7. // process it later, without touching the disk.
  8. //
  9. // Some features:
  10. // - It batches the SQL inserts into a single query and inserts either after a time threshold or if the batch is full
  11. // - If the mysql driver crashes, it's able to recover, log the incident and resume again.
  12. // - It also does a clean shutdown - it tries to save everything before returning
  13. //
  14. // Short history:
  15. // Started with issuing an insert query for each single email and another query to update the tally
  16. // Then applied the following optimizations:
  17. // - Moved tally updates to another background process which does the tallying in a single query
  18. // - Changed the MySQL queries to insert in batch
  19. // - Made a Compressor that recycles buffers using sync.Pool
  20. // The result was around 400% speed improvement. If you know of any more improvements, please share!
  21. // - Added the recovery mechanisi,
  22. import (
  23. "fmt"
  24. "time"
  25. "github.com/garyburd/redigo/redis"
  26. "bytes"
  27. "compress/zlib"
  28. "database/sql"
  29. _ "github.com/go-sql-driver/mysql"
  30. "github.com/go-sql-driver/mysql"
  31. "io"
  32. "runtime/debug"
  33. "strings"
  34. "sync"
  35. )
  36. // how many rows to batch at a time
  37. const GuerrillaDBAndRedisBatchMax = 2
  38. // tick on every...
  39. const GuerrillaDBAndRedisBatchTimeout = time.Second * 3
  40. func init() {
  41. backends["guerrilla-db-redis"] = &AbstractBackend{
  42. extend: &GuerrillaDBAndRedisBackend{}}
  43. }
  44. type GuerrillaDBAndRedisBackend struct {
  45. AbstractBackend
  46. config guerrillaDBAndRedisConfig
  47. batcherWg sync.WaitGroup
  48. // cache prepared queries
  49. cache stmtCache
  50. }
  51. // statement cache. It's an array, not slice
  52. type stmtCache [GuerrillaDBAndRedisBatchMax]*sql.Stmt
  53. type guerrillaDBAndRedisConfig struct {
  54. NumberOfWorkers int `json:"save_workers_size"`
  55. MysqlTable string `json:"mail_table"`
  56. MysqlDB string `json:"mysql_db"`
  57. MysqlHost string `json:"mysql_host"`
  58. MysqlPass string `json:"mysql_pass"`
  59. MysqlUser string `json:"mysql_user"`
  60. RedisExpireSeconds int `json:"redis_expire_seconds"`
  61. RedisInterface string `json:"redis_interface"`
  62. PrimaryHost string `json:"primary_mail_host"`
  63. }
  64. func convertError(name string) error {
  65. return fmt.Errorf("failed to load backend config (%s)", name)
  66. }
  67. // Load the backend config for the backend. It has already been unmarshalled
  68. // from the main config file 'backend' config "backend_config"
  69. // Now we need to convert each type and copy into the guerrillaDBAndRedisConfig struct
  70. func (g *GuerrillaDBAndRedisBackend) loadConfig(backendConfig BackendConfig) (err error) {
  71. configType := baseConfig(&guerrillaDBAndRedisConfig{})
  72. bcfg, err := g.extractConfig(backendConfig, configType)
  73. if err != nil {
  74. return err
  75. }
  76. m := bcfg.(*guerrillaDBAndRedisConfig)
  77. g.config = *m
  78. return nil
  79. }
  80. func (g *GuerrillaDBAndRedisBackend) getNumberOfWorkers() int {
  81. return g.config.NumberOfWorkers
  82. }
  83. type redisClient struct {
  84. isConnected bool
  85. conn redis.Conn
  86. time int
  87. }
  88. // compressedData struct will be compressed using zlib when printed via fmt
  89. type compressedData struct {
  90. extraHeaders []byte
  91. data *bytes.Buffer
  92. pool *sync.Pool
  93. }
  94. // newCompressedData returns a new CompressedData
  95. func newCompressedData() *compressedData {
  96. var p = sync.Pool{
  97. New: func() interface{} {
  98. var b bytes.Buffer
  99. return &b
  100. },
  101. }
  102. return &compressedData{
  103. pool: &p,
  104. }
  105. }
  106. // Set the extraheaders and buffer of data to compress
  107. func (c *compressedData) set(b []byte, d *bytes.Buffer) {
  108. c.extraHeaders = b
  109. c.data = d
  110. }
  111. // implement Stringer interface
  112. func (c *compressedData) String() string {
  113. if c.data == nil {
  114. return ""
  115. }
  116. //borrow a buffer form the pool
  117. b := c.pool.Get().(*bytes.Buffer)
  118. // put back in the pool
  119. defer func() {
  120. b.Reset()
  121. c.pool.Put(b)
  122. }()
  123. var r *bytes.Reader
  124. w, _ := zlib.NewWriterLevel(b, zlib.BestSpeed)
  125. r = bytes.NewReader(c.extraHeaders)
  126. io.Copy(w, r)
  127. io.Copy(w, c.data)
  128. w.Close()
  129. return b.String()
  130. }
  131. // clear it, without clearing the pool
  132. func (c *compressedData) clear() {
  133. c.extraHeaders = []byte{}
  134. c.data = nil
  135. }
  136. // prepares the sql query with the number of rows that can be batched with it
  137. func (g *GuerrillaDBAndRedisBackend) prepareInsertQuery(rows int, db *sql.DB) *sql.Stmt {
  138. if rows == 0 {
  139. panic("rows argument cannot be 0")
  140. }
  141. if g.cache[rows-1] != nil {
  142. return g.cache[rows-1]
  143. }
  144. sqlstr := "INSERT INTO " + g.config.MysqlTable + " "
  145. sqlstr += "(`date`, `to`, `from`, `subject`, `body`, `charset`, `mail`, `spam_score`, `hash`, `content_type`, `recipient`, `has_attach`, `ip_addr`, `return_path`, `is_tls`)"
  146. sqlstr += " values "
  147. values := "(NOW(), ?, ?, ?, ? , 'UTF-8' , ?, 0, ?, '', ?, 0, ?, ?, ?)"
  148. // add more rows
  149. comma := ""
  150. for i := 0; i < rows; i++ {
  151. sqlstr += comma + values
  152. if comma == "" {
  153. comma = ","
  154. }
  155. }
  156. stmt, sqlErr := db.Prepare(sqlstr)
  157. if sqlErr != nil {
  158. mainlog.WithError(sqlErr).Fatalf("failed while db.Prepare(INSERT...)")
  159. }
  160. // cache it
  161. g.cache[rows-1] = stmt
  162. return stmt
  163. }
  164. func (g *GuerrillaDBAndRedisBackend) doQuery(c int, db *sql.DB, insertStmt *sql.Stmt, vals *[]interface{}) {
  165. var execErr error
  166. defer func() {
  167. if r := recover(); r != nil {
  168. //logln(1, fmt.Sprintf("Recovered in %v", r))
  169. mainlog.Error("Recovered form panic:", r, string(debug.Stack()))
  170. sum := 0
  171. for _, v := range *vals {
  172. if str, ok := v.(string); ok {
  173. sum = sum + len(str)
  174. }
  175. }
  176. mainlog.Errorf("panic while inserting query [%s] size:%d, err %v", r, sum, execErr)
  177. panic("query failed")
  178. }
  179. }()
  180. // prepare the query used to insert when rows reaches batchMax
  181. insertStmt = g.prepareInsertQuery(c, db)
  182. _, execErr = insertStmt.Exec(*vals...)
  183. if execErr != nil {
  184. mainlog.WithError(execErr).Error("There was a problem the insert")
  185. }
  186. }
  187. // Batches the rows from the feeder chan in to a single INSERT statement.
  188. // Execute the batches query when:
  189. // - number of batched rows reaches a threshold, i.e. count n = threshold
  190. // - or, no new rows within a certain time, i.e. times out
  191. // The goroutine can either exit if there's a panic or feeder channel closes
  192. // it returns feederOk which signals if the feeder chanel was ok (still open) while returning
  193. // if it feederOk is false, then it means the feeder chanel is closed
  194. func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(feeder chan []interface{}, db *sql.DB) (feederOk bool) {
  195. // controls shutdown
  196. defer g.batcherWg.Done()
  197. g.batcherWg.Add(1)
  198. // vals is where values are batched to
  199. var vals []interface{}
  200. // how many rows were batched
  201. count := 0
  202. // The timer will tick every second.
  203. // Interrupting the select clause when there's no data on the feeder channel
  204. t := time.NewTimer(GuerrillaDBAndRedisBatchTimeout)
  205. // prepare the query used to insert when rows reaches batchMax
  206. insertStmt := g.prepareInsertQuery(GuerrillaDBAndRedisBatchMax, db)
  207. // inserts executes a batched insert query, clears the vals and resets the count
  208. insert := func(c int) {
  209. if c > 0 {
  210. g.doQuery(c, db, insertStmt, &vals)
  211. }
  212. vals = nil
  213. count = 0
  214. }
  215. defer func() {
  216. if r := recover(); r != nil {
  217. mainlog.Error("insertQueryBatcher caught a panic", r)
  218. }
  219. }()
  220. // Keep getting values from feeder and add to batch.
  221. // if feeder times out, execute the batched query
  222. // otherwise, execute the batched query once it reaches the GuerrillaDBAndRedisBatchMax threshold
  223. feederOk = true
  224. for {
  225. select {
  226. // it may panic when reading on a closed feeder channel. feederOK detects if it was closed
  227. case row, feederOk := <-feeder:
  228. if row == nil {
  229. mainlog.Info("Query batchaer exiting")
  230. // Insert any remaining rows
  231. insert(count)
  232. return feederOk
  233. }
  234. vals = append(vals, row...)
  235. count++
  236. mainlog.Debug("new feeder row:", row, " cols:", len(row), " count:", count, " worker", workerId)
  237. if count >= GuerrillaDBAndRedisBatchMax {
  238. insert(GuerrillaDBAndRedisBatchMax)
  239. }
  240. // stop timer from firing (reset the interrupt)
  241. if !t.Stop() {
  242. <-t.C
  243. }
  244. t.Reset(GuerrillaDBAndRedisBatchTimeout)
  245. case <-t.C:
  246. // anything to insert?
  247. if n := len(vals); n > 0 {
  248. insert(count)
  249. }
  250. t.Reset(GuerrillaDBAndRedisBatchTimeout)
  251. }
  252. }
  253. }
  254. func trimToLimit(str string, limit int) string {
  255. ret := strings.TrimSpace(str)
  256. if len(str) > limit {
  257. ret = str[:limit]
  258. }
  259. return ret
  260. }
  261. var workerId = 0
  262. func (g *GuerrillaDBAndRedisBackend) mysqlConnect() (*sql.DB, error) {
  263. conf := mysql.Config{
  264. User: g.config.MysqlUser,
  265. Passwd: g.config.MysqlPass,
  266. DBName: g.config.MysqlDB,
  267. Net: "tcp",
  268. Addr: g.config.MysqlHost,
  269. ReadTimeout: GuerrillaDBAndRedisBatchTimeout + (time.Second * 10),
  270. WriteTimeout: GuerrillaDBAndRedisBatchTimeout + (time.Second * 10),
  271. Params: map[string]string{"collation": "utf8_general_ci"},
  272. }
  273. if db, err := sql.Open("mysql", conf.FormatDSN()); err != nil {
  274. mainlog.Error("cannot open mysql", err)
  275. return nil, err
  276. } else {
  277. return db, nil
  278. }
  279. }
  280. func (g *GuerrillaDBAndRedisBackend) saveMailWorker(saveMailChan chan *savePayload) {
  281. var to, body string
  282. var redisErr error
  283. workerId++
  284. redisClient := &redisClient{}
  285. var db *sql.DB
  286. var err error
  287. db, err = g.mysqlConnect()
  288. if err != nil {
  289. mainlog.Fatalf("cannot open mysql: %s", err)
  290. }
  291. // start the query SQL batching where we will send data via the feeder channel
  292. feeder := make(chan []interface{}, 1)
  293. go func() {
  294. for {
  295. if feederOK := g.insertQueryBatcher(feeder, db); !feederOK {
  296. mainlog.Debug("insertQueryBatcher exited")
  297. return
  298. }
  299. // if insertQueryBatcher panics, it can recover and go in again
  300. mainlog.Debug("resuming insertQueryBatcher")
  301. }
  302. }()
  303. defer func() {
  304. if r := recover(); r != nil {
  305. //recover form closed channel
  306. mainlog.Error("panic recovered in saveMailWorker", r)
  307. }
  308. db.Close()
  309. if redisClient.conn != nil {
  310. mainlog.Infof("closed redis")
  311. redisClient.conn.Close()
  312. }
  313. // close the feeder & wait for query batcher to exit.
  314. close(feeder)
  315. g.batcherWg.Wait()
  316. }()
  317. var vals []interface{}
  318. data := newCompressedData()
  319. // receives values from the channel repeatedly until it is closed.
  320. for {
  321. payload := <-saveMailChan
  322. if payload == nil {
  323. mainlog.Debug("No more saveMailChan payload")
  324. return
  325. }
  326. mainlog.Debug("Got mail from chan", payload.mail.RemoteAddress)
  327. to = trimToLimit(strings.TrimSpace(payload.recipient.User)+"@"+g.config.PrimaryHost, 255)
  328. payload.mail.Helo = trimToLimit(payload.mail.Helo, 255)
  329. payload.recipient.Host = trimToLimit(payload.recipient.Host, 255)
  330. ts := fmt.Sprintf("%d", time.Now().UnixNano())
  331. payload.mail.ParseHeaders()
  332. hash := MD5Hex(
  333. to,
  334. payload.mail.MailFrom.String(),
  335. payload.mail.Subject,
  336. ts)
  337. // Add extra headers
  338. var addHead string
  339. addHead += "Delivered-To: " + to + "\r\n"
  340. addHead += "Received: from " + payload.mail.Helo + " (" + payload.mail.Helo + " [" + payload.mail.RemoteAddress + "])\r\n"
  341. addHead += " by " + payload.recipient.Host + " with SMTP id " + hash + "@" + payload.recipient.Host + ";\r\n"
  342. addHead += " " + time.Now().Format(time.RFC1123Z) + "\r\n"
  343. // data will be compressed when printed, with addHead added to beginning
  344. data.set([]byte(addHead), &payload.mail.Data)
  345. body = "gzencode"
  346. // data will be written to redis - it implements the Stringer interface, redigo uses fmt to
  347. // print the data to redis.
  348. redisErr = redisClient.redisConnection(g.config.RedisInterface)
  349. if redisErr == nil {
  350. _, doErr := redisClient.conn.Do("SETEX", hash, g.config.RedisExpireSeconds, data)
  351. if doErr == nil {
  352. body = "redis" // the backend system will know to look in redis for the message data
  353. data.clear() // blank
  354. }
  355. } else {
  356. mainlog.WithError(redisErr).Warn("Error while connecting redis")
  357. }
  358. vals = []interface{}{} // clear the vals
  359. vals = append(vals,
  360. trimToLimit(to, 255),
  361. trimToLimit(payload.mail.MailFrom.String(), 255),
  362. trimToLimit(payload.mail.Subject, 255),
  363. body,
  364. data.String(),
  365. hash,
  366. trimToLimit(to, 255),
  367. payload.mail.RemoteAddress,
  368. trimToLimit(payload.mail.MailFrom.String(), 255),
  369. payload.mail.TLS)
  370. feeder <- vals
  371. payload.savedNotify <- &saveStatus{nil, hash}
  372. }
  373. }
  374. func (c *redisClient) redisConnection(redisInterface string) (err error) {
  375. if c.isConnected == false {
  376. c.conn, err = redis.Dial("tcp", redisInterface)
  377. if err != nil {
  378. // handle error
  379. return err
  380. }
  381. c.isConnected = true
  382. }
  383. return nil
  384. }
  385. // test database connection settings
  386. func (g *GuerrillaDBAndRedisBackend) testSettings() (err error) {
  387. var db *sql.DB
  388. if db, err = g.mysqlConnect(); err != nil {
  389. err = fmt.Errorf("MySql cannot connect, check your settings: %s", err)
  390. } else {
  391. db.Close()
  392. }
  393. redisClient := &redisClient{}
  394. if redisErr := redisClient.redisConnection(g.config.RedisInterface); redisErr != nil {
  395. err = fmt.Errorf("Redis cannot connect, check your settings: %s", redisErr)
  396. }
  397. return
  398. }