p_guerrilla_db_redis.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. package backends
  2. import (
  3. "bytes"
  4. "compress/zlib"
  5. "database/sql"
  6. "fmt"
  7. "io"
  8. "math/rand"
  9. "runtime/debug"
  10. "strings"
  11. "sync"
  12. "time"
  13. "github.com/flashmob/go-guerrilla/mail"
  14. )
  15. // ----------------------------------------------------------------------------------
  16. // Processor Name: GuerrillaRedisDB
  17. // ----------------------------------------------------------------------------------
  18. // Description : Saves the body to redis, meta data to SQL. Example only.
  19. // : Limitation: it doesn't save multiple recipients or validate them
  20. // ----------------------------------------------------------------------------------
  21. // Config Options: ...
  22. // --------------:-------------------------------------------------------------------
  23. // Input : envelope
  24. // ----------------------------------------------------------------------------------
  25. // Output :
  26. // ----------------------------------------------------------------------------------
  27. func init() {
  28. processors["guerrillaredisdb"] = func() Decorator {
  29. return GuerrillaDbRedis()
  30. }
  31. }
  32. var queryBatcherId = 0
  33. // how many rows to batch at a time
  34. const GuerrillaDBAndRedisBatchMax = 50
  35. // tick on every...
  36. const GuerrillaDBAndRedisBatchTimeout = time.Second * 3
  37. type GuerrillaDBAndRedisBackend struct {
  38. config *guerrillaDBAndRedisConfig
  39. batcherWg sync.WaitGroup
  40. // cache prepared queries
  41. cache stmtCache
  42. batcherStoppers []chan bool
  43. }
  44. // statement cache. It's an array, not slice
  45. type stmtCache [GuerrillaDBAndRedisBatchMax]*sql.Stmt
  46. type guerrillaDBAndRedisConfig struct {
  47. Table string `json:"mail_table"`
  48. Driver string `json:"sql_driver"`
  49. DSN string `json:"sql_dsn"`
  50. RedisExpireSeconds int `json:"redis_expire_seconds"`
  51. RedisInterface string `json:"redis_interface"`
  52. PrimaryHost string `json:"primary_mail_host"`
  53. BatchTimeout int `json:"redis_sql_batch_timeout,omitempty"`
  54. }
  55. // Load the backend config for the backend. It has already been unmarshalled
  56. // from the main config file 'backend' config "backend_config"
  57. // Now we need to convert each type and copy into the guerrillaDBAndRedisConfig struct
  58. func (g *GuerrillaDBAndRedisBackend) loadConfig(backendConfig BackendConfig) (err error) {
  59. configType := BaseConfig(&guerrillaDBAndRedisConfig{})
  60. bcfg, err := Svc.ExtractConfig(ConfigProcessors, "guerrillaredisdb", backendConfig, configType)
  61. if err != nil {
  62. return err
  63. }
  64. m := bcfg.(*guerrillaDBAndRedisConfig)
  65. g.config = m
  66. return nil
  67. }
  68. type redisClient struct {
  69. isConnected bool
  70. conn RedisConn
  71. time int
  72. }
  73. // compressedData struct will be compressed using zlib when printed via fmt
  74. type compressedData struct {
  75. extraHeaders []byte
  76. data *bytes.Buffer
  77. pool *sync.Pool
  78. }
  79. // newCompressedData returns a new CompressedData
  80. func newCompressedData() *compressedData {
  81. var p = sync.Pool{
  82. New: func() interface{} {
  83. var b bytes.Buffer
  84. return &b
  85. },
  86. }
  87. return &compressedData{
  88. pool: &p,
  89. }
  90. }
  91. // Set the extraheaders and buffer of data to compress
  92. func (c *compressedData) set(b []byte, d *bytes.Buffer) {
  93. c.extraHeaders = b
  94. c.data = d
  95. }
  96. // implement Stringer interface
  97. func (c *compressedData) String() string {
  98. if c.data == nil {
  99. return ""
  100. }
  101. //borrow a buffer form the pool
  102. b := c.pool.Get().(*bytes.Buffer)
  103. // put back in the pool
  104. defer func() {
  105. b.Reset()
  106. c.pool.Put(b)
  107. }()
  108. var r *bytes.Reader
  109. w, _ := zlib.NewWriterLevel(b, zlib.BestSpeed)
  110. r = bytes.NewReader(c.extraHeaders)
  111. _, _ = io.Copy(w, r)
  112. _, _ = io.Copy(w, c.data)
  113. _ = w.Close()
  114. return b.String()
  115. }
  116. // clear it, without clearing the pool
  117. func (c *compressedData) clear() {
  118. c.extraHeaders = []byte{}
  119. c.data = nil
  120. }
  121. // prepares the sql query with the number of rows that can be batched with it
  122. func (g *GuerrillaDBAndRedisBackend) prepareInsertQuery(rows int, db *sql.DB) *sql.Stmt {
  123. if rows == 0 {
  124. panic("rows argument cannot be 0")
  125. }
  126. if g.cache[rows-1] != nil {
  127. return g.cache[rows-1]
  128. }
  129. sqlstr := "INSERT INTO " + g.config.Table + "" +
  130. "(" +
  131. "`date`, " +
  132. "`to`, " +
  133. "`from`, " +
  134. "`subject`, " +
  135. "`body`, " +
  136. "`charset`, " +
  137. "`mail`, " +
  138. "`spam_score`, " +
  139. "`hash`, " +
  140. "`content_type`, " +
  141. "`recipient`, " +
  142. "`has_attach`, " +
  143. "`ip_addr`, " +
  144. "`return_path`, " +
  145. "`is_tls`" +
  146. ")" +
  147. " values "
  148. values := "(NOW(), ?, ?, ?, ? , 'UTF-8' , ?, 0, ?, '', ?, 0, ?, ?, ?)"
  149. // add more rows
  150. comma := ""
  151. for i := 0; i < rows; i++ {
  152. sqlstr += comma + values
  153. if comma == "" {
  154. comma = ","
  155. }
  156. }
  157. stmt, sqlErr := db.Prepare(sqlstr)
  158. if sqlErr != nil {
  159. Log().WithError(sqlErr).Fatalf("failed while db.Prepare(INSERT...)")
  160. }
  161. // cache it
  162. g.cache[rows-1] = stmt
  163. return stmt
  164. }
  165. func (g *GuerrillaDBAndRedisBackend) doQuery(c int, db *sql.DB, insertStmt *sql.Stmt, vals *[]interface{}) error {
  166. var execErr error
  167. defer func() {
  168. if r := recover(); r != nil {
  169. sum := 0
  170. for _, v := range *vals {
  171. if str, ok := v.(string); ok {
  172. sum = sum + len(str)
  173. }
  174. }
  175. Log().Fields("panic", fmt.Sprintf("%v", r),
  176. "size", sum,
  177. "error", execErr,
  178. "stack", string(debug.Stack())).
  179. Error("panic while inserting query")
  180. panic("query failed")
  181. }
  182. }()
  183. // prepare the query used to insert when rows reaches batchMax
  184. insertStmt = g.prepareInsertQuery(c, db)
  185. _, execErr = insertStmt.Exec(*vals...)
  186. if execErr != nil {
  187. Log().WithError(execErr).Error("There was a problem the insert")
  188. }
  189. return execErr
  190. }
  191. // Batches the rows from the feeder chan in to a single INSERT statement.
  192. // Execute the batches query when:
  193. // - number of batched rows reaches a threshold, i.e. count n = threshold
  194. // - or, no new rows within a certain time, i.e. times out
  195. // The goroutine can either exit if there's a panic or feeder channel closes
  196. // it returns feederOk which signals if the feeder chanel was ok (still open) while returning
  197. // if it feederOk is false, then it means the feeder chanel is closed
  198. func (g *GuerrillaDBAndRedisBackend) insertQueryBatcher(
  199. feeder feedChan,
  200. db *sql.DB,
  201. batcherId int,
  202. stop chan bool) (feederOk bool) {
  203. // controls shutdown
  204. defer g.batcherWg.Done()
  205. g.batcherWg.Add(1)
  206. // vals is where values are batched to
  207. var vals []interface{}
  208. // how many rows were batched
  209. count := 0
  210. // The timer will tick x seconds.
  211. // Interrupting the select clause when there's no data on the feeder channel
  212. timeo := GuerrillaDBAndRedisBatchTimeout
  213. if g.config.BatchTimeout > 0 {
  214. timeo = time.Duration(g.config.BatchTimeout)
  215. }
  216. t := time.NewTimer(timeo)
  217. // prepare the query used to insert when rows reaches batchMax
  218. insertStmt := g.prepareInsertQuery(GuerrillaDBAndRedisBatchMax, db)
  219. // inserts executes a batched insert query, clears the vals and resets the count
  220. inserter := func(c int) {
  221. if c > 0 {
  222. err := g.doQuery(c, db, insertStmt, &vals)
  223. if err != nil {
  224. // maybe connection prob?
  225. // retry the sql query
  226. attempts := 3
  227. for i := 0; i < attempts; i++ {
  228. Log().Fields("rows", c).Info("retrying query query rows ")
  229. time.Sleep(time.Second)
  230. err = g.doQuery(c, db, insertStmt, &vals)
  231. if err == nil {
  232. continue
  233. }
  234. }
  235. }
  236. }
  237. vals = nil
  238. count = 0
  239. }
  240. rand.Seed(time.Now().UnixNano())
  241. defer func() {
  242. if r := recover(); r != nil {
  243. Log().Error("insertQueryBatcher caught a panic", r, string(debug.Stack()))
  244. }
  245. }()
  246. // Keep getting values from feeder and add to batch.
  247. // if feeder times out, execute the batched query
  248. // otherwise, execute the batched query once it reaches the GuerrillaDBAndRedisBatchMax threshold
  249. feederOk = true
  250. for {
  251. select {
  252. // it may panic when reading on a closed feeder channel. feederOK detects if it was closed
  253. case <-stop:
  254. Log().Fields("batcherID", batcherId).Info("MySQL query batcher stopped")
  255. // Insert any remaining rows
  256. inserter(count)
  257. feederOk = false
  258. close(feeder)
  259. return
  260. case row := <-feeder:
  261. vals = append(vals, row...)
  262. count++
  263. Log().Fields(
  264. "row", row,
  265. "cols", len(row),
  266. "count", count,
  267. "worker", batcherId,
  268. ).Debug("new feeder row")
  269. if count >= GuerrillaDBAndRedisBatchMax {
  270. inserter(GuerrillaDBAndRedisBatchMax)
  271. }
  272. // stop timer from firing (reset the interrupt)
  273. if !t.Stop() {
  274. // darin the timer
  275. <-t.C
  276. }
  277. t.Reset(timeo)
  278. case <-t.C:
  279. // anything to insert?
  280. if n := len(vals); n > 0 {
  281. inserter(count)
  282. }
  283. t.Reset(timeo)
  284. }
  285. }
  286. }
  287. func trimToLimit(str string, limit int) string {
  288. ret := strings.TrimSpace(str)
  289. if len(str) > limit {
  290. ret = str[:limit]
  291. }
  292. return ret
  293. }
  294. func (g *GuerrillaDBAndRedisBackend) sqlConnect() (*sql.DB, error) {
  295. if db, err := sql.Open(g.config.Driver, g.config.DSN); err != nil {
  296. Log().Error("cannot open database", err, "]")
  297. return nil, err
  298. } else {
  299. // do we have access?
  300. _, err = db.Query("SELECT mail_id FROM " + g.config.Table + " LIMIT 1")
  301. if err != nil {
  302. Log().Error("cannot select table:", err)
  303. return nil, err
  304. }
  305. return db, nil
  306. }
  307. }
  308. func (c *redisClient) redisConnection(redisInterface string) (err error) {
  309. if c.isConnected == false {
  310. c.conn, err = RedisDialer("tcp", redisInterface)
  311. if err != nil {
  312. // handle error
  313. return err
  314. }
  315. c.isConnected = true
  316. }
  317. return nil
  318. }
  319. type feedChan chan []interface{}
  320. // GuerrillaDbRedis is a specialized processor for Guerrilla mail. It is here as an example.
  321. // It's an example of a 'monolithic' processor.
  322. func GuerrillaDbRedis() Decorator {
  323. g := GuerrillaDBAndRedisBackend{}
  324. redisClient := &redisClient{}
  325. var (
  326. db *sql.DB
  327. to, body string
  328. redisErr error
  329. feeders []feedChan
  330. )
  331. g.batcherStoppers = make([]chan bool, 0)
  332. Svc.AddInitializer(InitializeWith(func(backendConfig BackendConfig) error {
  333. configType := BaseConfig(&guerrillaDBAndRedisConfig{})
  334. bcfg, err := Svc.ExtractConfig(
  335. ConfigProcessors, "guerrillaredisdb", backendConfig, configType)
  336. if err != nil {
  337. return err
  338. }
  339. g.config = bcfg.(*guerrillaDBAndRedisConfig)
  340. db, err = g.sqlConnect()
  341. if err != nil {
  342. return err
  343. }
  344. queryBatcherId++
  345. // start the query SQL batching where we will send data via the feeder channel
  346. stop := make(chan bool)
  347. feeder := make(feedChan, 1)
  348. go func(qbID int, stop chan bool) {
  349. // we loop so that if insertQueryBatcher panics, it can recover and go in again
  350. for {
  351. if feederOK := g.insertQueryBatcher(feeder, db, qbID, stop); !feederOK {
  352. Log().Fields("qbID", qbID).Debug("insertQueryBatcher exited")
  353. return
  354. }
  355. Log().Debug("resuming insertQueryBatcher")
  356. }
  357. }(queryBatcherId, stop)
  358. g.batcherStoppers = append(g.batcherStoppers, stop)
  359. feeders = append(feeders, feeder)
  360. return nil
  361. }))
  362. Svc.AddShutdowner(ShutdownWith(func() error {
  363. if db != nil {
  364. if err := db.Close(); err != nil {
  365. Log().WithError(err).Error("close sql database")
  366. } else {
  367. Log().Info("closed sql database")
  368. }
  369. }
  370. if redisClient.conn != nil {
  371. if err := redisClient.conn.Close(); err != nil {
  372. Log().WithError(err).Error("close redis failed")
  373. } else {
  374. Log().Info("closed redis")
  375. }
  376. }
  377. // send a close signal to all query batchers to exit.
  378. for i := range g.batcherStoppers {
  379. g.batcherStoppers[i] <- true
  380. }
  381. g.batcherWg.Wait()
  382. return nil
  383. }))
  384. var vals []interface{}
  385. data := newCompressedData()
  386. return func(p Processor) Processor {
  387. return ProcessWith(func(e *mail.Envelope, task SelectTask) (Result, error) {
  388. if task == TaskSaveMail {
  389. Log().Debug("Got mail from chan,", e.RemoteIP)
  390. to = trimToLimit(strings.TrimSpace(e.RcptTo[0].User)+"@"+g.config.PrimaryHost, 255)
  391. e.Helo = trimToLimit(e.Helo, 255)
  392. e.RcptTo[0].Host = trimToLimit(e.RcptTo[0].Host, 255)
  393. ts := fmt.Sprintf("%d", time.Now().UnixNano())
  394. if err := e.ParseHeaders(); err != nil {
  395. Log().WithError(err).Error("failed to parse headers")
  396. }
  397. hash := MD5Hex(
  398. to,
  399. e.MailFrom.String(),
  400. e.Subject,
  401. ts)
  402. e.QueuedId.FromHex(hash)
  403. // Add extra headers
  404. var addHead string
  405. addHead += "Delivered-To: " + to + "\r\n"
  406. addHead += "Received: from " + e.RemoteIP + " ([" + e.RemoteIP + "])\r\n"
  407. addHead += " by " + e.RcptTo[0].Host + " with " + e.Protocol().String() + " id " + hash + "@" + e.RcptTo[0].Host + ";\r\n"
  408. addHead += " " + time.Now().Format(time.RFC1123Z) + "\r\n"
  409. // data will be compressed when printed, with addHead added to beginning
  410. data.set([]byte(addHead), &e.Data)
  411. body = "gzencode"
  412. // data will be written to redis - it implements the Stringer interface, redigo uses fmt to
  413. // print the data to redis.
  414. redisErr = redisClient.redisConnection(g.config.RedisInterface)
  415. if redisErr == nil {
  416. _, doErr := redisClient.conn.Do("SETEX", hash, g.config.RedisExpireSeconds, data)
  417. if doErr == nil {
  418. body = "redis" // the backend system will know to look in redis for the message data
  419. data.clear() // blank
  420. }
  421. } else {
  422. Log().WithError(redisErr).Warn("Error while connecting redis")
  423. }
  424. vals = []interface{}{} // clear the vals
  425. vals = append(vals,
  426. trimToLimit(to, 255),
  427. trimToLimit(e.MailFrom.String(), 255),
  428. trimToLimit(e.Subject, 255),
  429. body,
  430. data.String(),
  431. hash,
  432. trimToLimit(to, 255),
  433. e.RemoteIP,
  434. trimToLimit(e.MailFrom.String(), 255),
  435. e.TLS)
  436. // give the values to a random query batcher
  437. feeders[rand.Intn(len(feeders))] <- vals
  438. return p.Process(e, task)
  439. } else {
  440. return p.Process(e, task)
  441. }
  442. })
  443. }
  444. }