p_guerrilla_db_redis.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. package backends
  2. import (
  3. "bytes"
  4. "compress/zlib"
  5. "database/sql"
  6. "fmt"
  7. "io"
  8. "math/rand"
  9. "runtime/debug"
  10. "strings"
  11. "sync"
  12. "time"
  13. "github.com/flashmob/go-guerrilla/mail"
  14. "github.com/garyburd/redigo/redis"
  15. )
  16. // ----------------------------------------------------------------------------------
  17. // Processor Name: GuerrillaRedisDB
  18. // ----------------------------------------------------------------------------------
  19. // Description : Saves the body to redis, meta data to SQL. Example only.
  20. // : Limitation: it doesn't save multiple recipients or validate them
  21. // ----------------------------------------------------------------------------------
  22. // Config Options: ...
  23. // --------------:-------------------------------------------------------------------
  24. // Input : envelope
  25. // ----------------------------------------------------------------------------------
  26. // Output :
  27. // ----------------------------------------------------------------------------------
  28. func init() {
  29. processors["guerrillaredisdb"] = func() Decorator {
  30. return GuerrillaDbRedis()
  31. }
  32. }
  33. var queryBatcherId = 0
  34. // how many rows to batch at a time
  35. const GuerrillaDBAndRedisBatchMax = 50
  36. // tick on every...
  37. const GuerrillaDBAndRedisBatchTimeout = time.Second * 3
  38. type GuerrillaDBAndRedisBackend struct {
  39. config *guerrillaDBAndRedisConfig
  40. batcherWg sync.WaitGroup
  41. // cache prepared queries
  42. cache stmtCache
  43. batcherStoppers []chan bool
  44. }
  45. // statement cache. It's an array, not slice
  46. type stmtCache [GuerrillaDBAndRedisBatchMax]*sql.Stmt
  47. type guerrillaDBAndRedisConfig struct {
  48. NumberOfWorkers int `json:"save_workers_size"`
  49. Table string `json:"mail_table"`
  50. Driver string `json:"sql_driver"`
  51. DSN string `json:"sql_dsn"`
  52. RedisExpireSeconds int `json:"redis_expire_seconds"`
  53. RedisInterface string `json:"redis_interface"`
  54. PrimaryHost string `json:"primary_mail_host"`
  55. BatchTimeout int `json:"redis_sql_batch_timeout,omitempty"`
  56. }
  57. // Load the backend config for the backend. It has already been unmarshalled
  58. // from the main config file 'backend' config "backend_config"
  59. // Now we need to convert each type and copy into the guerrillaDBAndRedisConfig struct
  60. func (g *GuerrillaDBAndRedisBackend) loadConfig(backendConfig BackendConfig) (err error) {
  61. configType := BaseConfig(&guerrillaDBAndRedisConfig{})
  62. bcfg, err := Svc.ExtractConfig(backendConfig, configType)
  63. if err != nil {
  64. return err
  65. }
  66. m := bcfg.(*guerrillaDBAndRedisConfig)
  67. g.config = m
  68. return nil
  69. }
  70. func (g *GuerrillaDBAndRedisBackend) getNumberOfWorkers() int {
  71. return g.config.NumberOfWorkers
  72. }
  73. type redisClient struct {
  74. isConnected bool
  75. conn redis.Conn
  76. time int
  77. }
  78. // compressedData struct will be compressed using zlib when printed via fmt
  79. type compressedData struct {
  80. extraHeaders []byte
  81. data *bytes.Buffer
  82. pool *sync.Pool
  83. }
  84. // newCompressedData returns a new CompressedData
  85. func newCompressedData() *compressedData {
  86. var p = sync.Pool{
  87. New: func() interface{} {
  88. var b bytes.Buffer
  89. return &b
  90. },
  91. }
  92. return &compressedData{
  93. pool: &p,
  94. }
  95. }
  96. // Set the extraheaders and buffer of data to compress
  97. func (c *compressedData) set(b []byte, d *bytes.Buffer) {
  98. c.extraHeaders = b
  99. c.data = d
  100. }
  101. // implement Stringer interface
  102. func (c *compressedData) String() string {
  103. if c.data == nil {
  104. return ""
  105. }
  106. //borrow a buffer form the pool
  107. b := c.pool.Get().(*bytes.Buffer)
  108. // put back in the pool
  109. defer func() {
  110. b.Reset()
  111. c.pool.Put(b)
  112. }()
  113. var r *bytes.Reader
  114. w, _ := zlib.NewWriterLevel(b, zlib.BestSpeed)
  115. r = bytes.NewReader(c.extraHeaders)
  116. io.Copy(w, r)
  117. io.Copy(w, c.data)
  118. w.Close()
  119. return b.String()
  120. }
  121. // clear it, without clearing the pool
  122. func (c *compressedData) clear() {
  123. c.extraHeaders = []byte{}
  124. c.data = nil
  125. }
  126. // prepares the sql query with the number of rows that can be batched with it
  127. func (g *GuerrillaDBAndRedisBackend) prepareInsertQuery(rows int, db *sql.DB) *sql.Stmt {
  128. if rows == 0 {
  129. panic("rows argument cannot be 0")
  130. }
  131. if g.cache[rows-1] != nil {
  132. return g.cache[rows-1]
  133. }
  134. sqlstr := "INSERT INTO " + g.config.Table + " "
  135. sqlstr += "(`date`, `to`, `from`, `subject`, `body`, `charset`, `mail`, `spam_score`, `hash`, `content_type`, `recipient`, `has_attach`, `ip_addr`, `return_path`, `is_tls`)"
  136. sqlstr += " values "
  137. values := "(NOW(), ?, ?, ?, ? , 'UTF-8' , ?, 0, ?, '', ?, 0, ?, ?, ?)"
  138. // add more rows
  139. comma := ""
  140. for i := 0; i < rows; i++ {
  141. sqlstr += comma + values
  142. if comma == "" {
  143. comma = ","
  144. }
  145. }
  146. stmt, sqlErr := db.Prepare(sqlstr)
  147. if sqlErr != nil {
  148. Log().WithError(sqlErr).Fatalf("failed while db.Prepare(INSERT...)")
  149. }
  150. // cache it
  151. g.cache[rows-1] = stmt
  152. return stmt
  153. }
  154. func (g *GuerrillaDBAndRedisBackend) doQuery(c int, db *sql.DB, insertStmt *sql.Stmt, vals *[]interface{}) error {
  155. var execErr error
  156. defer func() {
  157. if r := recover(); r != nil {
  158. //logln(1, fmt.Sprintf("Recovered in %v", r))
  159. Log().Error("Recovered form panic:", r, string(debug.Stack()))
  160. sum := 0
  161. for _, v := range *vals {
  162. if str, ok := v.(string); ok {
  163. sum = sum + len(str)
  164. }
  165. }
  166. Log().Errorf("panic while inserting query [%s] size:%d, err %v", r, sum, execErr)
  167. panic("query failed")
  168. }
  169. }()
  170. // prepare the query used to insert when rows reaches batchMax
  171. insertStmt = g.prepareInsertQuery(c, db)
  172. _, execErr = insertStmt.Exec(*vals...)
  173. //if rand.Intn(2) == 1 {
  174. // return errors.New("uggabooka")
  175. //}
  176. if execErr != nil {
  177. Log().WithError(execErr).Error("There was a problem the insert")
  178. }
  179. return execErr
  180. }
  181. // Batches the rows from the feeder chan in to a single INSERT statement.
  182. // Execute the batches query when:
  183. // - number of batched rows reaches a threshold, i.e. count n = threshold
  184. // - or, no new rows within a certain time, i.e. times out
  185. // The goroutine can either exit if there's a panic or feeder channel closes
  186. // it returns feederOk which signals if the feeder chanel was ok (still open) while returning
  187. // if it feederOk is false, then it means the feeder chanel is closed
  188. func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(
  189. feeder feedChan,
  190. db *sql.DB,
  191. batcherId int,
  192. stop chan bool) (feederOk bool) {
  193. // controls shutdown
  194. defer g.batcherWg.Done()
  195. g.batcherWg.Add(1)
  196. // vals is where values are batched to
  197. var vals []interface{}
  198. // how many rows were batched
  199. count := 0
  200. // The timer will tick x seconds.
  201. // Interrupting the select clause when there's no data on the feeder channel
  202. timeo := GuerrillaDBAndRedisBatchTimeout
  203. if g.config.BatchTimeout > 0 {
  204. timeo = time.Duration(g.config.BatchTimeout)
  205. }
  206. t := time.NewTimer(timeo)
  207. // prepare the query used to insert when rows reaches batchMax
  208. insertStmt := g.prepareInsertQuery(GuerrillaDBAndRedisBatchMax, db)
  209. // inserts executes a batched insert query, clears the vals and resets the count
  210. inserter := func(c int) {
  211. if c > 0 {
  212. err := g.doQuery(c, db, insertStmt, &vals)
  213. if err != nil {
  214. // maybe connection prob?
  215. // retry the sql query
  216. attempts := 3
  217. for i := 0; i < attempts; i++ {
  218. Log().Infof("retrying query query rows[%c] ", c)
  219. time.Sleep(time.Second)
  220. err = g.doQuery(c, db, insertStmt, &vals)
  221. if err == nil {
  222. continue
  223. }
  224. }
  225. }
  226. }
  227. vals = nil
  228. count = 0
  229. }
  230. rand.Seed(time.Now().UnixNano())
  231. defer func() {
  232. if r := recover(); r != nil {
  233. Log().Error("insertQueryBatcher caught a panic", r, string(debug.Stack()))
  234. }
  235. }()
  236. // Keep getting values from feeder and add to batch.
  237. // if feeder times out, execute the batched query
  238. // otherwise, execute the batched query once it reaches the GuerrillaDBAndRedisBatchMax threshold
  239. feederOk = true
  240. for {
  241. select {
  242. // it may panic when reading on a closed feeder channel. feederOK detects if it was closed
  243. case <-stop:
  244. Log().Infof("MySQL query batcher stopped (#%d)", batcherId)
  245. // Insert any remaining rows
  246. inserter(count)
  247. feederOk = false
  248. close(feeder)
  249. return
  250. case row := <-feeder:
  251. vals = append(vals, row...)
  252. count++
  253. Log().Debug("new feeder row:", row, " cols:", len(row), " count:", count, " worker", batcherId)
  254. if count >= GuerrillaDBAndRedisBatchMax {
  255. inserter(GuerrillaDBAndRedisBatchMax)
  256. }
  257. // stop timer from firing (reset the interrupt)
  258. if !t.Stop() {
  259. // darin the timer
  260. <-t.C
  261. }
  262. t.Reset(timeo)
  263. case <-t.C:
  264. // anything to insert?
  265. if n := len(vals); n > 0 {
  266. inserter(count)
  267. }
  268. t.Reset(timeo)
  269. }
  270. }
  271. }
  272. func trimToLimit(str string, limit int) string {
  273. ret := strings.TrimSpace(str)
  274. if len(str) > limit {
  275. ret = str[:limit]
  276. }
  277. return ret
  278. }
  279. func (g *GuerrillaDBAndRedisBackend) sqlConnect() (*sql.DB, error) {
  280. tOut := GuerrillaDBAndRedisBatchTimeout
  281. if g.config.BatchTimeout > 0 {
  282. tOut = time.Duration(g.config.BatchTimeout)
  283. }
  284. tOut += 10
  285. // don't go to 30 sec or more
  286. if tOut >= 30 {
  287. tOut = 29
  288. }
  289. if db, err := sql.Open(g.config.Driver, g.config.DSN); err != nil {
  290. Log().Error("cannot open database", err, "]")
  291. return nil, err
  292. } else {
  293. // do we have access?
  294. _, err = db.Query("SELECT mail_id FROM " + g.config.Table + " LIMIT 1")
  295. if err != nil {
  296. Log().Error("cannot select table:", err)
  297. return nil, err
  298. }
  299. return db, nil
  300. }
  301. }
  302. func (c *redisClient) redisConnection(redisInterface string) (err error) {
  303. if c.isConnected == false {
  304. c.conn, err = redis.Dial("tcp", redisInterface)
  305. if err != nil {
  306. // handle error
  307. return err
  308. }
  309. c.isConnected = true
  310. }
  311. return nil
  312. }
  313. type feedChan chan []interface{}
  314. // GuerrillaDbRedis is a specialized processor for Guerrilla mail. It is here as an example.
  315. // It's an example of a 'monolithic' processor.
  316. func GuerrillaDbRedis() Decorator {
  317. g := GuerrillaDBAndRedisBackend{}
  318. redisClient := &redisClient{}
  319. var db *sql.DB
  320. var to, body string
  321. var redisErr error
  322. var feeders []feedChan
  323. g.batcherStoppers = make([]chan bool, 0)
  324. Svc.AddInitializer(InitializeWith(func(backendConfig BackendConfig) error {
  325. configType := BaseConfig(&guerrillaDBAndRedisConfig{})
  326. bcfg, err := Svc.ExtractConfig(backendConfig, configType)
  327. if err != nil {
  328. return err
  329. }
  330. g.config = bcfg.(*guerrillaDBAndRedisConfig)
  331. db, err = g.sqlConnect()
  332. if err != nil {
  333. return err
  334. }
  335. queryBatcherId++
  336. // start the query SQL batching where we will send data via the feeder channel
  337. stop := make(chan bool)
  338. feeder := make(feedChan, 1)
  339. go func(qbID int, stop chan bool) {
  340. // we loop so that if insertQueryBatcher panics, it can recover and go in again
  341. for {
  342. if feederOK := g.insertQueryBatcher(feeder, db, qbID, stop); !feederOK {
  343. Log().Debugf("insertQueryBatcher exited (#%d)", qbID)
  344. return
  345. }
  346. Log().Debug("resuming insertQueryBatcher")
  347. }
  348. }(queryBatcherId, stop)
  349. g.batcherStoppers = append(g.batcherStoppers, stop)
  350. feeders = append(feeders, feeder)
  351. return nil
  352. }))
  353. Svc.AddShutdowner(ShutdownWith(func() error {
  354. db.Close()
  355. Log().Infof("closed sql")
  356. if redisClient.conn != nil {
  357. Log().Infof("closed redis")
  358. redisClient.conn.Close()
  359. }
  360. // send a close signal to all query batchers to exit.
  361. for i := range g.batcherStoppers {
  362. g.batcherStoppers[i] <- true
  363. }
  364. g.batcherWg.Wait()
  365. return nil
  366. }))
  367. var vals []interface{}
  368. data := newCompressedData()
  369. return func(p Processor) Processor {
  370. return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
  371. if task == TaskSaveMail {
  372. Log().Debug("Got mail from chan,", e.RemoteIP)
  373. to = trimToLimit(strings.TrimSpace(e.RcptTo[0].User)+"@"+g.config.PrimaryHost, 255)
  374. e.Helo = trimToLimit(e.Helo, 255)
  375. e.RcptTo[0].Host = trimToLimit(e.RcptTo[0].Host, 255)
  376. ts := fmt.Sprintf("%d", time.Now().UnixNano())
  377. e.ParseHeaders()
  378. hash := MD5Hex(
  379. to,
  380. e.MailFrom.String(),
  381. e.Subject,
  382. ts)
  383. // Add extra headers
  384. var addHead string
  385. addHead += "Delivered-To: " + to + "\r\n"
  386. addHead += "Received: from " + e.Helo + " (" + e.Helo + " [" + e.RemoteIP + "])\r\n"
  387. addHead += " by " + e.RcptTo[0].Host + " with SMTP id " + hash + "@" + e.RcptTo[0].Host + ";\r\n"
  388. addHead += " " + time.Now().Format(time.RFC1123Z) + "\r\n"
  389. // data will be compressed when printed, with addHead added to beginning
  390. data.set([]byte(addHead), &e.Data)
  391. body = "gzencode"
  392. // data will be written to redis - it implements the Stringer interface, redigo uses fmt to
  393. // print the data to redis.
  394. redisErr = redisClient.redisConnection(g.config.RedisInterface)
  395. if redisErr == nil {
  396. _, doErr := redisClient.conn.Do("SETEX", hash, g.config.RedisExpireSeconds, data)
  397. if doErr == nil {
  398. body = "redis" // the backend system will know to look in redis for the message data
  399. data.clear() // blank
  400. }
  401. } else {
  402. Log().WithError(redisErr).Warn("Error while connecting redis")
  403. }
  404. vals = []interface{}{} // clear the vals
  405. vals = append(vals,
  406. trimToLimit(to, 255),
  407. trimToLimit(e.MailFrom.String(), 255),
  408. trimToLimit(e.Subject, 255),
  409. body,
  410. data.String(),
  411. hash,
  412. trimToLimit(to, 255),
  413. e.RemoteIP,
  414. trimToLimit(e.MailFrom.String(), 255),
  415. e.TLS)
  416. // give the values to a random query batcher
  417. feeders[rand.Intn(len(feeders))] <- vals
  418. return p.Process(e, task)
  419. } else {
  420. return p.Process(e, task)
  421. }
  422. })
  423. }
  424. }