queue.cpp 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. #include <atomic> // Because I wanted the C++11 memory order semantics, of which gb.h does not offer (because it was a C89 library)
  2. template <typename T>
  3. struct MPMCQueueNode {
  4. T data;
  5. std::atomic<isize> idx;
  6. };
  7. typedef char CacheLinePad[64];
  8. // Multiple Producer Multiple Consumer Queue
  9. template <typename T>
  10. struct MPMCQueue {
  11. CacheLinePad pad0;
  12. isize mask;
  13. Array<MPMCQueueNode<T>> buffer;
  14. gbMutex mutex;
  15. std::atomic<isize> count;
  16. CacheLinePad pad1;
  17. std::atomic<isize> head_idx;
  18. CacheLinePad pad2;
  19. std::atomic<isize> tail_idx;
  20. CacheLinePad pad3;
  21. };
  22. template <typename T>
  23. void mpmc_init(MPMCQueue<T> *q, gbAllocator a, isize size) {
  24. size = next_pow2_isize(size);
  25. GB_ASSERT(gb_is_power_of_two(size));
  26. gb_mutex_init(&q->mutex);
  27. q->mask = size-1;
  28. array_init(&q->buffer, a, size);
  29. for (isize i = 0; i < size; i++) {
  30. q->buffer[i].idx.store(i, std::memory_order_relaxed);
  31. }
  32. }
  33. template <typename T>
  34. void mpmc_destroy(MPMCQueue<T> *q) {
  35. gb_mutex_destroy(&q->mutex);
  36. gb_free(q->buffer.allocator, q->buffer.data);
  37. }
  38. template <typename T>
  39. bool mpmc_enqueue(MPMCQueue<T> *q, T const &data) {
  40. isize head_idx = q->head_idx.load(std::memory_order_relaxed);
  41. for (;;) {
  42. auto node = &q->buffer.data[head_idx & q->mask];
  43. isize node_idx = node->idx.load(std::memory_order_acquire);
  44. isize diff = node_idx - head_idx;
  45. if (diff == 0) {
  46. isize next_head_idx = head_idx+1;
  47. if (q->head_idx.compare_exchange_weak(head_idx, next_head_idx)) {
  48. node->data = data;
  49. node->idx.store(next_head_idx, std::memory_order_release);
  50. q->count.fetch_add(1, std::memory_order_release);
  51. return true;
  52. }
  53. } else if (diff < 0) {
  54. gb_mutex_lock(&q->mutex);
  55. isize old_size = q->buffer.count;
  56. isize new_size = old_size*2;
  57. array_resize(&q->buffer, new_size);
  58. if (q->buffer.data == nullptr) {
  59. GB_PANIC("Unable to resize enqueue: %td -> %td", old_size, new_size);
  60. gb_mutex_unlock(&q->mutex);
  61. return false;
  62. }
  63. for (isize i = old_size; i < new_size; i++) {
  64. q->buffer.data[i].idx.store(i, std::memory_order_relaxed);
  65. }
  66. q->mask = new_size-1;
  67. gb_mutex_unlock(&q->mutex);
  68. } else {
  69. head_idx = q->head_idx.load(std::memory_order_relaxed);
  70. }
  71. }
  72. }
  73. template <typename T>
  74. bool mpmc_dequeue(MPMCQueue<T> *q, T *data_) {
  75. isize tail_idx = q->tail_idx.load(std::memory_order_relaxed);
  76. for (;;) {
  77. auto node = &q->buffer.data[tail_idx & q->mask];
  78. isize node_idx = node->idx.load(std::memory_order_acquire);
  79. isize diff = node_idx - (tail_idx+1);
  80. if (diff == 0) {
  81. isize next_tail_idx = tail_idx+1;
  82. if (q->tail_idx.compare_exchange_weak(tail_idx, next_tail_idx)) {
  83. if (data_) *data_ = node->data;
  84. node->idx.store(tail_idx + q->mask + 1, std::memory_order_release);
  85. q->count.fetch_sub(1, std::memory_order_release);
  86. return true;
  87. }
  88. } else if (diff < 0) {
  89. return false;
  90. } else {
  91. tail_idx = q->tail_idx.load(std::memory_order_relaxed);
  92. }
  93. }
  94. }