ringbuffer.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. #ifndef RINGBUFFER_H
  2. #define RINGBUFFER_H
  3. #include <atomic>
  4. #include <cassert>
  5. #include <cstddef>
  6. #include <memory>
  7. #include <new>
  8. #include <utility>
  9. #include "almalloc.h"
  10. #include "flexarray.h"
  11. /* NOTE: This lockless ringbuffer implementation is copied from JACK, extended
  12. * to include an element size. Consequently, parameters and return values for a
  13. * size or count are in 'elements', not bytes. Additionally, it only supports
  14. * single-consumer/single-provider operation.
  15. */
  16. struct RingBuffer {
  17. private:
  18. #if defined(__cpp_lib_hardware_interference_size) && !defined(_LIBCPP_VERSION)
  19. static constexpr std::size_t sCacheAlignment{std::hardware_destructive_interference_size};
  20. #else
  21. /* Assume a 64-byte cache line, the most common/likely value. */
  22. static constexpr std::size_t sCacheAlignment{64};
  23. #endif
  24. alignas(sCacheAlignment) std::atomic<std::size_t> mWriteCount{0u};
  25. alignas(sCacheAlignment) std::atomic<std::size_t> mReadCount{0u};
  26. alignas(sCacheAlignment) const std::size_t mWriteSize;
  27. const std::size_t mSizeMask;
  28. const std::size_t mElemSize;
  29. al::FlexArray<std::byte, 16> mBuffer;
  30. public:
  31. struct Data {
  32. std::byte *buf;
  33. std::size_t len;
  34. };
  35. using DataPair = std::pair<Data,Data>;
  36. RingBuffer(const std::size_t writesize, const std::size_t mask, const std::size_t elemsize,
  37. const std::size_t numbytes)
  38. : mWriteSize{writesize}, mSizeMask{mask}, mElemSize{elemsize}, mBuffer{numbytes}
  39. { }
  40. /** Reset the read and write pointers to zero. This is not thread safe. */
  41. auto reset() noexcept -> void;
  42. /**
  43. * Return the number of elements available for reading. This is the number
  44. * of elements in front of the read pointer and behind the write pointer.
  45. */
  46. [[nodiscard]] auto readSpace() const noexcept -> std::size_t
  47. {
  48. const std::size_t w{mWriteCount.load(std::memory_order_acquire)};
  49. const std::size_t r{mReadCount.load(std::memory_order_acquire)};
  50. /* mWriteCount is never more than mWriteSize greater than mReadCount. */
  51. return w - r;
  52. }
  53. /**
  54. * The copying data reader. Copy at most `count' elements into `dest'.
  55. * Returns the actual number of elements copied.
  56. */
  57. [[nodiscard]] auto read(void *dest, std::size_t count) noexcept -> std::size_t;
  58. /**
  59. * The copying data reader w/o read pointer advance. Copy at most `count'
  60. * elements into `dest'. Returns the actual number of elements copied.
  61. */
  62. [[nodiscard]] auto peek(void *dest, std::size_t count) const noexcept -> std::size_t;
  63. /**
  64. * The non-copying data reader. Returns two ringbuffer data pointers that
  65. * hold the current readable data. If the readable data is in one segment
  66. * the second segment has zero length.
  67. */
  68. [[nodiscard]] auto getReadVector() noexcept -> DataPair;
  69. /** Advance the read pointer `count' places. */
  70. auto readAdvance(std::size_t count) noexcept -> void
  71. {
  72. const std::size_t w{mWriteCount.load(std::memory_order_acquire)};
  73. const std::size_t r{mReadCount.load(std::memory_order_relaxed)};
  74. [[maybe_unused]] const std::size_t readable{w - r};
  75. assert(readable >= count);
  76. mReadCount.store(r+count, std::memory_order_release);
  77. }
  78. /**
  79. * Return the number of elements available for writing. This is the total
  80. * number of writable elements excluding what's readable (already written).
  81. */
  82. [[nodiscard]] auto writeSpace() const noexcept -> std::size_t
  83. { return mWriteSize - readSpace(); }
  84. /**
  85. * The copying data writer. Copy at most `count' elements from `src'. Returns
  86. * the actual number of elements copied.
  87. */
  88. [[nodiscard]] auto write(const void *src, std::size_t count) noexcept -> std::size_t;
  89. /**
  90. * The non-copying data writer. Returns two ringbuffer data pointers that
  91. * hold the current writeable data. If the writeable data is in one segment
  92. * the second segment has zero length.
  93. */
  94. [[nodiscard]] auto getWriteVector() noexcept -> DataPair;
  95. /** Advance the write pointer `count' places. */
  96. auto writeAdvance(std::size_t count) noexcept -> void
  97. {
  98. const std::size_t w{mWriteCount.load(std::memory_order_relaxed)};
  99. const std::size_t r{mReadCount.load(std::memory_order_acquire)};
  100. [[maybe_unused]] const std::size_t writable{mWriteSize - (w - r)};
  101. assert(writable >= count);
  102. mWriteCount.store(w+count, std::memory_order_release);
  103. }
  104. [[nodiscard]] auto getElemSize() const noexcept -> std::size_t { return mElemSize; }
  105. /**
  106. * Create a new ringbuffer to hold at least `sz' elements of `elem_sz'
  107. * bytes. The number of elements is rounded up to a power of two. If
  108. * `limit_writes' is true, the writable space will be limited to `sz'
  109. * elements regardless of the rounded size.
  110. */
  111. [[nodiscard]] static
  112. auto Create(std::size_t sz, std::size_t elem_sz, bool limit_writes) -> std::unique_ptr<RingBuffer>;
  113. DEF_FAM_NEWDEL(RingBuffer, mBuffer)
  114. };
  115. using RingBufferPtr = std::unique_ptr<RingBuffer>;
  116. #endif /* RINGBUFFER_H */