pool.go 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. package guerrilla
  2. import (
  3. "errors"
  4. "github.com/flashmob/go-guerrilla/log"
  5. "github.com/flashmob/go-guerrilla/mail"
  6. "net"
  7. "sync"
  8. "sync/atomic"
  9. "time"
  10. )
  11. var (
  12. ErrPoolShuttingDown = errors.New("server pool: shutting down")
  13. )
  14. // a struct can be pooled if it has the following interface
  15. type Poolable interface {
  16. // ability to set read/write timeout
  17. setTimeout(t time.Duration) error
  18. // set a new connection and client id
  19. init(c net.Conn, clientID uint64, ep *mail.Pool)
  20. // get a unique id
  21. getID() uint64
  22. kill()
  23. }
  24. // Pool holds Clients.
  25. type Pool struct {
  26. // clients that are ready to be borrowed
  27. pool chan Poolable
  28. // semaphore to control number of maximum borrowed clients
  29. sem chan bool
  30. // book-keeping of clients that have been lent
  31. activeClients lentClients
  32. isShuttingDownFlg atomic.Value
  33. poolGuard sync.Mutex
  34. ShutdownChan chan int
  35. }
  36. type lentClients struct {
  37. m map[uint64]Poolable
  38. mu sync.Mutex // guards access to this struct
  39. wg sync.WaitGroup
  40. }
  41. // maps the callback on all lentClients
  42. func (c *lentClients) mapAll(callback func(p Poolable)) {
  43. defer c.mu.Unlock()
  44. c.mu.Lock()
  45. for _, item := range c.m {
  46. callback(item)
  47. }
  48. }
  49. // operation performs an operation on a Poolable item using the callback
  50. func (c *lentClients) operation(callback func(p Poolable), item Poolable) {
  51. defer c.mu.Unlock()
  52. c.mu.Lock()
  53. callback(item)
  54. }
  55. // NewPool creates a new pool of Clients.
  56. func NewPool(poolSize int) *Pool {
  57. return &Pool{
  58. pool: make(chan Poolable, poolSize),
  59. sem: make(chan bool, poolSize),
  60. activeClients: lentClients{m: make(map[uint64]Poolable, poolSize)},
  61. ShutdownChan: make(chan int, 1),
  62. }
  63. }
  64. func (p *Pool) Start() {
  65. p.isShuttingDownFlg.Store(true)
  66. }
  67. // Lock the pool from borrowing then remove all active clients
  68. // each active client's timeout is lowered to 1 sec and notified
  69. // to stop accepting commands
  70. func (p *Pool) ShutdownState() {
  71. const aVeryLowTimeout = 1
  72. p.poolGuard.Lock() // ensure no other thread is in the borrowing now
  73. defer p.poolGuard.Unlock()
  74. p.isShuttingDownFlg.Store(true) // no more borrowing
  75. p.ShutdownChan <- 1 // release any waiting p.sem
  76. // set a low timeout (let the clients finish whatever the're doing)
  77. p.activeClients.mapAll(func(p Poolable) {
  78. if err := p.setTimeout(time.Duration(int64(aVeryLowTimeout))); err != nil {
  79. p.kill()
  80. }
  81. })
  82. }
  83. func (p *Pool) ShutdownWait() {
  84. p.poolGuard.Lock() // ensure no other thread is in the borrowing now
  85. defer p.poolGuard.Unlock()
  86. p.activeClients.wg.Wait() // wait for clients to finish
  87. if len(p.ShutdownChan) > 0 {
  88. // drain
  89. <-p.ShutdownChan
  90. }
  91. p.isShuttingDownFlg.Store(false)
  92. }
  93. // returns true if the pool is shutting down
  94. func (p *Pool) IsShuttingDown() bool {
  95. if value, ok := p.isShuttingDownFlg.Load().(bool); ok {
  96. return value
  97. }
  98. return false
  99. }
  100. // set a timeout for all lent clients
  101. func (p *Pool) SetTimeout(duration time.Duration) {
  102. p.activeClients.mapAll(func(p Poolable) {
  103. if err := p.setTimeout(duration); err != nil {
  104. p.kill()
  105. }
  106. })
  107. }
  108. // Gets the number of active clients that are currently
  109. // out of the pool and busy serving
  110. func (p *Pool) GetActiveClientsCount() int {
  111. return len(p.sem)
  112. }
  113. // Borrow a Client from the pool. Will block if len(activeClients) > maxClients
  114. func (p *Pool) Borrow(conn net.Conn, clientID uint64, logger log.Logger, ep *mail.Pool) (Poolable, error) {
  115. p.poolGuard.Lock()
  116. defer p.poolGuard.Unlock()
  117. var c Poolable
  118. if yes, really := p.isShuttingDownFlg.Load().(bool); yes && really {
  119. // pool is shutting down.
  120. return c, ErrPoolShuttingDown
  121. }
  122. select {
  123. case p.sem <- true: // block the client from serving until there is room
  124. select {
  125. case c = <-p.pool:
  126. c.init(conn, clientID, ep)
  127. default:
  128. c = NewClient(conn, clientID, logger, ep)
  129. }
  130. p.activeClientsAdd(c)
  131. case <-p.ShutdownChan: // unblock p.sem when shutting down
  132. // pool is shutting down.
  133. return c, ErrPoolShuttingDown
  134. }
  135. return c, nil
  136. }
  137. // Return returns a Client back to the pool.
  138. func (p *Pool) Return(c Poolable) {
  139. p.activeClientsRemove(c)
  140. select {
  141. case p.pool <- c:
  142. default:
  143. // hasta la vista, baby...
  144. }
  145. <-p.sem // make room for the next serving client
  146. }
  147. func (p *Pool) activeClientsAdd(c Poolable) {
  148. p.activeClients.operation(func(item Poolable) {
  149. p.activeClients.wg.Add(1)
  150. p.activeClients.m[c.getID()] = item
  151. }, c)
  152. }
  153. func (p *Pool) activeClientsRemove(c Poolable) {
  154. p.activeClients.operation(func(item Poolable) {
  155. delete(p.activeClients.m, item.getID())
  156. p.activeClients.wg.Done()
  157. }, c)
  158. }