p_guerrilla_db_redis.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. package backends
  2. import (
  3. "bytes"
  4. "compress/zlib"
  5. "database/sql"
  6. "fmt"
  7. "github.com/flashmob/go-guerrilla/mail"
  8. "github.com/garyburd/redigo/redis"
  9. "github.com/go-sql-driver/mysql"
  10. "io"
  11. "math/rand"
  12. "runtime/debug"
  13. "strings"
  14. "sync"
  15. "time"
  16. )
  17. // ----------------------------------------------------------------------------------
  18. // Processor Name: GuerrillaRedsDB
  19. // ----------------------------------------------------------------------------------
  20. // Description : Saves the body to redis, meta data to mysql. Example only.
  21. // : Limitation: it doesn't save multiple recipients or validate them
  22. // ----------------------------------------------------------------------------------
  23. // Config Options: ...
  24. // --------------:-------------------------------------------------------------------
  25. // Input : envelope
  26. // ----------------------------------------------------------------------------------
  27. // Output :
  28. // ----------------------------------------------------------------------------------
  29. func init() {
  30. processors["guerrillaredisdb"] = func() Decorator {
  31. return GuerrillaDbReddis()
  32. }
  33. }
  34. var queryBatcherId = 0
  35. // how many rows to batch at a time
  36. const GuerrillaDBAndRedisBatchMax = 50
  37. // tick on every...
  38. const GuerrillaDBAndRedisBatchTimeout = time.Second * 3
  39. type GuerrillaDBAndRedisBackend struct {
  40. config *guerrillaDBAndRedisConfig
  41. batcherWg sync.WaitGroup
  42. // cache prepared queries
  43. cache stmtCache
  44. batcherStoppers []chan bool
  45. }
  46. // statement cache. It's an array, not slice
  47. type stmtCache [GuerrillaDBAndRedisBatchMax]*sql.Stmt
  48. type guerrillaDBAndRedisConfig struct {
  49. NumberOfWorkers int `json:"save_workers_size"`
  50. MysqlTable string `json:"mail_table"`
  51. MysqlDB string `json:"mysql_db"`
  52. MysqlHost string `json:"mysql_host"`
  53. MysqlPass string `json:"mysql_pass"`
  54. MysqlUser string `json:"mysql_user"`
  55. RedisExpireSeconds int `json:"redis_expire_seconds"`
  56. RedisInterface string `json:"redis_interface"`
  57. PrimaryHost string `json:"primary_mail_host"`
  58. BatchTimeout int `json:"redis_mysql_batch_timeout,omitempty"`
  59. }
  60. // Load the backend config for the backend. It has already been unmarshalled
  61. // from the main config file 'backend' config "backend_config"
  62. // Now we need to convert each type and copy into the guerrillaDBAndRedisConfig struct
  63. func (g *GuerrillaDBAndRedisBackend) loadConfig(backendConfig BackendConfig) (err error) {
  64. configType := BaseConfig(&guerrillaDBAndRedisConfig{})
  65. bcfg, err := Svc.ExtractConfig(backendConfig, configType)
  66. if err != nil {
  67. return err
  68. }
  69. m := bcfg.(*guerrillaDBAndRedisConfig)
  70. g.config = m
  71. return nil
  72. }
  73. func (g *GuerrillaDBAndRedisBackend) getNumberOfWorkers() int {
  74. return g.config.NumberOfWorkers
  75. }
  76. type redisClient struct {
  77. isConnected bool
  78. conn redis.Conn
  79. time int
  80. }
  81. // compressedData struct will be compressed using zlib when printed via fmt
  82. type compressedData struct {
  83. extraHeaders []byte
  84. data *bytes.Buffer
  85. pool *sync.Pool
  86. }
  87. // newCompressedData returns a new CompressedData
  88. func newCompressedData() *compressedData {
  89. var p = sync.Pool{
  90. New: func() interface{} {
  91. var b bytes.Buffer
  92. return &b
  93. },
  94. }
  95. return &compressedData{
  96. pool: &p,
  97. }
  98. }
  99. // Set the extraheaders and buffer of data to compress
  100. func (c *compressedData) set(b []byte, d *bytes.Buffer) {
  101. c.extraHeaders = b
  102. c.data = d
  103. }
  104. // implement Stringer interface
  105. func (c *compressedData) String() string {
  106. if c.data == nil {
  107. return ""
  108. }
  109. //borrow a buffer form the pool
  110. b := c.pool.Get().(*bytes.Buffer)
  111. // put back in the pool
  112. defer func() {
  113. b.Reset()
  114. c.pool.Put(b)
  115. }()
  116. var r *bytes.Reader
  117. w, _ := zlib.NewWriterLevel(b, zlib.BestSpeed)
  118. r = bytes.NewReader(c.extraHeaders)
  119. io.Copy(w, r)
  120. io.Copy(w, c.data)
  121. w.Close()
  122. return b.String()
  123. }
  124. // clear it, without clearing the pool
  125. func (c *compressedData) clear() {
  126. c.extraHeaders = []byte{}
  127. c.data = nil
  128. }
  129. // prepares the sql query with the number of rows that can be batched with it
  130. func (g *GuerrillaDBAndRedisBackend) prepareInsertQuery(rows int, db *sql.DB) *sql.Stmt {
  131. if rows == 0 {
  132. panic("rows argument cannot be 0")
  133. }
  134. if g.cache[rows-1] != nil {
  135. return g.cache[rows-1]
  136. }
  137. sqlstr := "INSERT INTO " + g.config.MysqlTable + " "
  138. sqlstr += "(`date`, `to`, `from`, `subject`, `body`, `charset`, `mail`, `spam_score`, `hash`, `content_type`, `recipient`, `has_attach`, `ip_addr`, `return_path`, `is_tls`)"
  139. sqlstr += " values "
  140. values := "(NOW(), ?, ?, ?, ? , 'UTF-8' , ?, 0, ?, '', ?, 0, ?, ?, ?)"
  141. // add more rows
  142. comma := ""
  143. for i := 0; i < rows; i++ {
  144. sqlstr += comma + values
  145. if comma == "" {
  146. comma = ","
  147. }
  148. }
  149. stmt, sqlErr := db.Prepare(sqlstr)
  150. if sqlErr != nil {
  151. Log().WithError(sqlErr).Fatalf("failed while db.Prepare(INSERT...)")
  152. }
  153. // cache it
  154. g.cache[rows-1] = stmt
  155. return stmt
  156. }
  157. func (g *GuerrillaDBAndRedisBackend) doQuery(c int, db *sql.DB, insertStmt *sql.Stmt, vals *[]interface{}) error {
  158. var execErr error
  159. defer func() {
  160. if r := recover(); r != nil {
  161. //logln(1, fmt.Sprintf("Recovered in %v", r))
  162. Log().Error("Recovered form panic:", r, string(debug.Stack()))
  163. sum := 0
  164. for _, v := range *vals {
  165. if str, ok := v.(string); ok {
  166. sum = sum + len(str)
  167. }
  168. }
  169. Log().Errorf("panic while inserting query [%s] size:%d, err %v", r, sum, execErr)
  170. panic("query failed")
  171. }
  172. }()
  173. // prepare the query used to insert when rows reaches batchMax
  174. insertStmt = g.prepareInsertQuery(c, db)
  175. _, execErr = insertStmt.Exec(*vals...)
  176. //if rand.Intn(2) == 1 {
  177. // return errors.New("uggabooka")
  178. //}
  179. if execErr != nil {
  180. Log().WithError(execErr).Error("There was a problem the insert")
  181. }
  182. return execErr
  183. }
  184. // Batches the rows from the feeder chan in to a single INSERT statement.
  185. // Execute the batches query when:
  186. // - number of batched rows reaches a threshold, i.e. count n = threshold
  187. // - or, no new rows within a certain time, i.e. times out
  188. // The goroutine can either exit if there's a panic or feeder channel closes
  189. // it returns feederOk which signals if the feeder chanel was ok (still open) while returning
  190. // if it feederOk is false, then it means the feeder chanel is closed
  191. func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(
  192. feeder feedChan,
  193. db *sql.DB,
  194. batcherId int,
  195. stop chan bool) (feederOk bool) {
  196. // controls shutdown
  197. defer g.batcherWg.Done()
  198. g.batcherWg.Add(1)
  199. // vals is where values are batched to
  200. var vals []interface{}
  201. // how many rows were batched
  202. count := 0
  203. // The timer will tick x seconds.
  204. // Interrupting the select clause when there's no data on the feeder channel
  205. timeo := GuerrillaDBAndRedisBatchTimeout
  206. if g.config.BatchTimeout > 0 {
  207. timeo = time.Duration(g.config.BatchTimeout)
  208. }
  209. t := time.NewTimer(timeo)
  210. // prepare the query used to insert when rows reaches batchMax
  211. insertStmt := g.prepareInsertQuery(GuerrillaDBAndRedisBatchMax, db)
  212. // inserts executes a batched insert query, clears the vals and resets the count
  213. inserter := func(c int) {
  214. if c > 0 {
  215. err := g.doQuery(c, db, insertStmt, &vals)
  216. if err != nil {
  217. // maybe connection prob?
  218. // retry the sql query
  219. attempts := 3
  220. for i := 0; i < attempts; i++ {
  221. Log().Infof("retrying query query rows[%c] ", c)
  222. time.Sleep(time.Second)
  223. err = g.doQuery(c, db, insertStmt, &vals)
  224. if err == nil {
  225. continue
  226. }
  227. }
  228. }
  229. }
  230. vals = nil
  231. count = 0
  232. }
  233. rand.Seed(time.Now().UnixNano())
  234. defer func() {
  235. if r := recover(); r != nil {
  236. Log().Error("insertQueryBatcher caught a panic", r, string(debug.Stack()))
  237. }
  238. }()
  239. // Keep getting values from feeder and add to batch.
  240. // if feeder times out, execute the batched query
  241. // otherwise, execute the batched query once it reaches the GuerrillaDBAndRedisBatchMax threshold
  242. feederOk = true
  243. for {
  244. select {
  245. // it may panic when reading on a closed feeder channel. feederOK detects if it was closed
  246. case <-stop:
  247. Log().Infof("MySQL query batcher stopped (#%d)", batcherId)
  248. // Insert any remaining rows
  249. inserter(count)
  250. feederOk = false
  251. close(feeder)
  252. return
  253. case row := <-feeder:
  254. vals = append(vals, row...)
  255. count++
  256. Log().Debug("new feeder row:", row, " cols:", len(row), " count:", count, " worker", batcherId)
  257. if count >= GuerrillaDBAndRedisBatchMax {
  258. inserter(GuerrillaDBAndRedisBatchMax)
  259. }
  260. // stop timer from firing (reset the interrupt)
  261. if !t.Stop() {
  262. // darin the timer
  263. <-t.C
  264. }
  265. t.Reset(timeo)
  266. case <-t.C:
  267. // anything to insert?
  268. if n := len(vals); n > 0 {
  269. inserter(count)
  270. }
  271. t.Reset(timeo)
  272. }
  273. }
  274. }
  275. func trimToLimit(str string, limit int) string {
  276. ret := strings.TrimSpace(str)
  277. if len(str) > limit {
  278. ret = str[:limit]
  279. }
  280. return ret
  281. }
  282. func (g *GuerrillaDBAndRedisBackend) mysqlConnect() (*sql.DB, error) {
  283. tOut := GuerrillaDBAndRedisBatchTimeout
  284. if g.config.BatchTimeout > 0 {
  285. tOut = time.Duration(g.config.BatchTimeout)
  286. }
  287. tOut += 10
  288. // don't go to 30 sec or more
  289. if tOut >= 30 {
  290. tOut = 29
  291. }
  292. conf := mysql.Config{
  293. User: g.config.MysqlUser,
  294. Passwd: g.config.MysqlPass,
  295. DBName: g.config.MysqlDB,
  296. Net: "tcp",
  297. Addr: g.config.MysqlHost,
  298. ReadTimeout: tOut * time.Second,
  299. WriteTimeout: tOut * time.Second,
  300. Params: map[string]string{"collation": "utf8_general_ci"},
  301. }
  302. if db, err := sql.Open("mysql", conf.FormatDSN()); err != nil {
  303. Log().Error("cannot open mysql", err, "]")
  304. return nil, err
  305. } else {
  306. // do we have access?
  307. _, err = db.Query("SELECT mail_id FROM " + g.config.MysqlTable + " LIMIT 1")
  308. if err != nil {
  309. Log().Error("cannot select table", err)
  310. return nil, err
  311. }
  312. return db, nil
  313. }
  314. }
  315. func (c *redisClient) redisConnection(redisInterface string) (err error) {
  316. if c.isConnected == false {
  317. c.conn, err = redis.Dial("tcp", redisInterface)
  318. if err != nil {
  319. // handle error
  320. return err
  321. }
  322. c.isConnected = true
  323. }
  324. return nil
  325. }
  326. type feedChan chan []interface{}
  327. // GuerrillaDbReddis is a specialized processor for Guerrilla mail. It is here as an example.
  328. // It's an example of a 'monolithic' processor.
  329. func GuerrillaDbReddis() Decorator {
  330. g := GuerrillaDBAndRedisBackend{}
  331. redisClient := &redisClient{}
  332. var db *sql.DB
  333. var to, body string
  334. var redisErr error
  335. var feeders []feedChan
  336. g.batcherStoppers = make([]chan bool, 0)
  337. Svc.AddInitializer(InitializeWith(func(backendConfig BackendConfig) error {
  338. configType := BaseConfig(&guerrillaDBAndRedisConfig{})
  339. bcfg, err := Svc.ExtractConfig(backendConfig, configType)
  340. if err != nil {
  341. return err
  342. }
  343. g.config = bcfg.(*guerrillaDBAndRedisConfig)
  344. db, err = g.mysqlConnect()
  345. if err != nil {
  346. return err
  347. }
  348. queryBatcherId++
  349. // start the query SQL batching where we will send data via the feeder channel
  350. stop := make(chan bool)
  351. feeder := make(feedChan, 1)
  352. go func(qbID int, stop chan bool) {
  353. // we loop so that if insertQueryBatcher panics, it can recover and go in again
  354. for {
  355. if feederOK := g.insertQueryBatcher(feeder, db, qbID, stop); !feederOK {
  356. Log().Debugf("insertQueryBatcher exited (#%d)", qbID)
  357. return
  358. }
  359. Log().Debug("resuming insertQueryBatcher")
  360. }
  361. }(queryBatcherId, stop)
  362. g.batcherStoppers = append(g.batcherStoppers, stop)
  363. feeders = append(feeders, feeder)
  364. return nil
  365. }))
  366. Svc.AddShutdowner(ShutdownWith(func() error {
  367. db.Close()
  368. Log().Infof("closed mysql")
  369. if redisClient.conn != nil {
  370. Log().Infof("closed redis")
  371. redisClient.conn.Close()
  372. }
  373. // send a close signal to all query batchers to exit.
  374. for i := range g.batcherStoppers {
  375. g.batcherStoppers[i] <- true
  376. }
  377. g.batcherWg.Wait()
  378. return nil
  379. }))
  380. var vals []interface{}
  381. data := newCompressedData()
  382. return func(p Processor) Processor {
  383. return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
  384. if task == TaskSaveMail {
  385. Log().Debug("Got mail from chan,", e.RemoteIP)
  386. to = trimToLimit(strings.TrimSpace(e.RcptTo[0].User)+"@"+g.config.PrimaryHost, 255)
  387. e.Helo = trimToLimit(e.Helo, 255)
  388. e.RcptTo[0].Host = trimToLimit(e.RcptTo[0].Host, 255)
  389. ts := fmt.Sprintf("%d", time.Now().UnixNano())
  390. e.ParseHeaders()
  391. hash := MD5Hex(
  392. to,
  393. e.MailFrom.String(),
  394. e.Subject,
  395. ts)
  396. // Add extra headers
  397. var addHead string
  398. addHead += "Delivered-To: " + to + "\r\n"
  399. addHead += "Received: from " + e.Helo + " (" + e.Helo + " [" + e.RemoteIP + "])\r\n"
  400. addHead += " by " + e.RcptTo[0].Host + " with SMTP id " + hash + "@" + e.RcptTo[0].Host + ";\r\n"
  401. addHead += " " + time.Now().Format(time.RFC1123Z) + "\r\n"
  402. // data will be compressed when printed, with addHead added to beginning
  403. data.set([]byte(addHead), &e.Data)
  404. body = "gzencode"
  405. // data will be written to redis - it implements the Stringer interface, redigo uses fmt to
  406. // print the data to redis.
  407. redisErr = redisClient.redisConnection(g.config.RedisInterface)
  408. if redisErr == nil {
  409. _, doErr := redisClient.conn.Do("SETEX", hash, g.config.RedisExpireSeconds, data)
  410. if doErr == nil {
  411. body = "redis" // the backend system will know to look in redis for the message data
  412. data.clear() // blank
  413. }
  414. } else {
  415. Log().WithError(redisErr).Warn("Error while connecting redis")
  416. }
  417. vals = []interface{}{} // clear the vals
  418. vals = append(vals,
  419. trimToLimit(to, 255),
  420. trimToLimit(e.MailFrom.String(), 255),
  421. trimToLimit(e.Subject, 255),
  422. body,
  423. data.String(),
  424. hash,
  425. trimToLimit(to, 255),
  426. e.RemoteIP,
  427. trimToLimit(e.MailFrom.String(), 255),
  428. e.TLS)
  429. // give the values to a random query batcher
  430. feeders[rand.Intn(len(feeders))] <- vals
  431. return p.Process(e, task)
  432. } else {
  433. return p.Process(e, task)
  434. }
  435. })
  436. }
  437. }