sbufq.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * Kamailio TLS module
  3. *
  4. * Copyright (C) 2010 iptelorg GmbH
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /** minimal overhead buffer queue in shm memory.
  19. * @file modules/tls/sbufq.h
  20. * @ingroup: tls
  21. * Module: @ref tls
  22. */
  23. #ifndef __sbufq_h
  24. #define __sbufq_h
  25. #include "../../compiler_opt.h"
  26. #include "../../ut.h"
  27. #include "../../mem/shm_mem.h"
  28. #include "../../timer_ticks.h"
  29. #include "../../timer.h"
  30. #include "../../dprint.h"
  31. #include <string.h>
  32. struct sbuf_elem {
  33. struct sbuf_elem* next;
  34. unsigned int b_size; /**< buf size */
  35. char buf[1]; /**< variable size buffer */
  36. };
  37. struct sbuffer_queue {
  38. struct sbuf_elem* first;
  39. struct sbuf_elem* last;
  40. ticks_t last_chg; /**< last change (creation time or partial flush)*/
  41. unsigned int queued; /**< total size */
  42. unsigned int offset; /**< offset in the first buffer where unflushed data
  43. starts */
  44. unsigned int last_used; /**< how much of the last buffer is used */
  45. };
  46. /* sbufq_flush() output flags */
  47. #define F_BUFQ_EMPTY 1
  48. #define F_BUFQ_ERROR_FLUSH 2
  49. #define sbufq_empty(bq) ((bq)->first==0)
  50. #define sbufq_non_empty(bq) ((bq)->first!=0)
  51. /** adds/appends data to a buffer queue.
  52. * WARNING: it does no attempt to synchronize access/lock. If needed it should
  53. * be called under lock.
  54. * @param q - buffer queue
  55. * @param data
  56. * @param size
  57. * @param min_buf_size - min size to allocate for new buffer elements
  58. * @return 0 on success, -1 on error (mem. allocation)
  59. */
  60. inline static int sbufq_add(struct sbuffer_queue* q, const void* data,
  61. unsigned int size, unsigned int min_buf_size)
  62. {
  63. struct sbuf_elem* b;
  64. unsigned int last_free;
  65. unsigned int b_size;
  66. unsigned int crt_size;
  67. get_ticks_raw();
  68. if (likely(q->last==0)) {
  69. b_size=MAX_unsigned(min_buf_size, size);
  70. b=shm_malloc(sizeof(*b)+b_size-sizeof(b->buf));
  71. if (unlikely(b==0))
  72. goto error;
  73. b->b_size=b_size;
  74. b->next=0;
  75. q->last=b;
  76. q->first=b;
  77. q->last_used=0;
  78. q->offset=0;
  79. q->last_chg=get_ticks_raw();
  80. last_free=b_size;
  81. crt_size=size;
  82. goto data_cpy;
  83. }else{
  84. b=q->last;
  85. }
  86. while(size){
  87. last_free=b->b_size-q->last_used;
  88. if (last_free==0){
  89. b_size=MAX_unsigned(min_buf_size, size);
  90. b=shm_malloc(sizeof(*b)+b_size-sizeof(b->buf));
  91. if (unlikely(b==0))
  92. goto error;
  93. b->b_size=b_size;
  94. b->next=0;
  95. q->last->next=b;
  96. q->last=b;
  97. q->last_used=0;
  98. last_free=b->b_size;
  99. }
  100. crt_size=MIN_unsigned(last_free, size);
  101. data_cpy:
  102. memcpy(b->buf+q->last_used, data, crt_size);
  103. q->last_used+=crt_size;
  104. size-=crt_size;
  105. data+=crt_size;
  106. q->queued+=crt_size;
  107. }
  108. return 0;
  109. error:
  110. return -1;
  111. }
  112. /** inserts data (at the beginning) in a buffer queue.
  113. * Note: should never be called after sbufq_run().
  114. * WARNING: it does no attempt to synchronize access/lock. If needed it should
  115. * be called under lock.
  116. * @param q - buffer queue
  117. * @param data
  118. * @param size
  119. * @param min_buf_size - min size to allocate for new buffer elements
  120. * @return 0 on success, -1 on error (mem. allocation)
  121. */
  122. inline static int sbufq_insert(struct sbuffer_queue* q, const void* data,
  123. unsigned int size, unsigned int min_buf_size)
  124. {
  125. struct sbuf_elem* b;
  126. if (likely(q->first==0)) /* if empty, use sbufq_add */
  127. return sbufq_add(q, data, size, min_buf_size);
  128. if (unlikely(q->offset)){
  129. LOG(L_CRIT, "BUG: non-null offset %d (bad call, should"
  130. "never be called after sbufq_run())\n", q->offset);
  131. goto error;
  132. }
  133. if ((q->first==q->last) && ((q->last->b_size-q->last_used)>=size)){
  134. /* one block with enough space in it for size bytes */
  135. memmove(q->first->buf+size, q->first->buf, size);
  136. memcpy(q->first->buf, data, size);
  137. q->last_used+=size;
  138. }else{
  139. /* create a size bytes block directly */
  140. b=shm_malloc(sizeof(*b)+size-sizeof(b->buf));
  141. if (unlikely(b==0))
  142. goto error;
  143. b->b_size=size;
  144. /* insert it */
  145. b->next=q->first;
  146. q->first=b;
  147. memcpy(b->buf, data, size);
  148. }
  149. q->queued+=size;
  150. return 0;
  151. error:
  152. return -1;
  153. }
  154. /** destroy a buffer queue.
  155. * Only the content is destroyed (shm_free()'d), the queue head is
  156. * re-intialized.
  157. * WARNING: it does no attempt to synchronize access/lock. If needed it should
  158. * be called under lock.
  159. * @param q - buffer queue
  160. * @return - number of bytes that used to be queued (>=0).
  161. */
  162. inline static unsigned int sbufq_destroy(struct sbuffer_queue* q)
  163. {
  164. struct sbuf_elem* b;
  165. struct sbuf_elem* next_b;
  166. int unqueued;
  167. unqueued=0;
  168. if (likely(q->first)){
  169. b=q->first;
  170. do{
  171. next_b=b->next;
  172. unqueued+=(b==q->last)?q->last_used:b->b_size;
  173. if (b==q->first)
  174. unqueued-=q->offset;
  175. shm_free(b);
  176. b=next_b;
  177. }while(b);
  178. }
  179. memset(q, 0, sizeof(*q));
  180. return unqueued;
  181. }
  182. /** tries to flush the queue.
  183. * Tries to flush as much as possible from the given queue, using the
  184. * given callback.
  185. * WARNING: it does no attempt to synchronize access/lock. If needed it should
  186. * be called under lock.
  187. * @param q - buffer queue
  188. * @param *flags - set to:
  189. * F_BUFQ_EMPTY if the queued is completely flushed
  190. * F_BUFQ_ERROR_FLUSH if the flush_f callback returned error.
  191. * @param flush_f - flush function (callback). modeled after write():
  192. * flush_f(param1, param2, const void* buf, unsigned size).
  193. * It should return the number of bytes "flushed" on
  194. * success, or <0 on error. If the number of bytes
  195. * "flushed" is smaller then the requested size, it
  196. * would be assumed that no more bytes can be flushed
  197. * and sbufq_flush will exit.
  198. * @param flush_p1 - parameter for the flush function callback.
  199. * @param flush_p2 - parameter for the flush function callback.
  200. * @return -1 on internal error, or the number of bytes flushed on
  201. * success (>=0). Note that the flags param is
  202. * always set and it should be used to check for errors, since
  203. * a flush_f() failure will not result in a negative return.
  204. */
  205. inline static int sbufq_flush(struct sbuffer_queue* q, int* flags,
  206. int (*flush_f)(void* p1, void* p2,
  207. const void* buf,
  208. unsigned size),
  209. void* flush_p1, void* flush_p2)
  210. {
  211. struct sbuf_elem *b;
  212. int n;
  213. int ret;
  214. int block_size;
  215. char* buf;
  216. *flags=0;
  217. ret=0;
  218. while(q->first){
  219. block_size=((q->first==q->last)?q->last_used:q->first->b_size)-
  220. q->offset;
  221. buf=q->first->buf+q->offset;
  222. n=flush_f(flush_p1, flush_p2, buf, block_size);
  223. if (likely(n>0)){
  224. ret+=n;
  225. if (likely(n==block_size)){
  226. b=q->first;
  227. q->first=q->first->next;
  228. shm_free(b);
  229. q->offset=0;
  230. q->queued-=block_size;
  231. }else{
  232. q->offset+=n;
  233. q->queued-=n;
  234. /* no break: if we are here n < block_size => partial write
  235. => the write should be retried */
  236. }
  237. }else{
  238. if (unlikely(n<0))
  239. *flags|=F_BUFQ_ERROR_FLUSH;
  240. break;
  241. }
  242. }
  243. if (likely(q->first==0)){
  244. q->last=0;
  245. q->last_used=0;
  246. q->offset=0;
  247. *flags|=F_BUFQ_EMPTY;
  248. }
  249. return ret;
  250. }
  251. #endif /*__sbufq_h*/
  252. /* vi: set ts=4 sw=4 tw=79:ai:cindent: */