ringbuffer.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. #ifndef RINGBUFFER_H
  2. #define RINGBUFFER_H
  3. #include <atomic>
  4. #include <cassert>
  5. #include <cstddef>
  6. #include <memory>
  7. #include <utility>
  8. #include "almalloc.h"
  9. #include "flexarray.h"
  10. /* NOTE: This lockless ringbuffer implementation is copied from JACK, extended
  11. * to include an element size. Consequently, parameters and return values for a
  12. * size or count are in 'elements', not bytes. Additionally, it only supports
  13. * single-consumer/single-provider operation.
  14. */
  15. struct RingBuffer {
  16. private:
  17. #if defined(__cpp_lib_hardware_interference_size) && !defined(_LIBCPP_VERSION)
  18. static constexpr std::size_t sCacheAlignment{std::hardware_destructive_interference_size};
  19. #else
  20. /* Assume a 64-byte cache line, the most common/likely value. */
  21. static constexpr std::size_t sCacheAlignment{64};
  22. #endif
  23. alignas(sCacheAlignment) std::atomic<std::size_t> mWriteCount{0u};
  24. alignas(sCacheAlignment) std::atomic<std::size_t> mReadCount{0u};
  25. alignas(sCacheAlignment) const std::size_t mWriteSize;
  26. const std::size_t mSizeMask;
  27. const std::size_t mElemSize;
  28. al::FlexArray<std::byte, 16> mBuffer;
  29. public:
  30. struct Data {
  31. std::byte *buf;
  32. std::size_t len;
  33. };
  34. using DataPair = std::array<Data,2>;
  35. RingBuffer(const std::size_t writesize, const std::size_t mask, const std::size_t elemsize,
  36. const std::size_t numbytes)
  37. : mWriteSize{writesize}, mSizeMask{mask}, mElemSize{elemsize}, mBuffer{numbytes}
  38. { }
  39. /** Reset the read and write pointers to zero. This is not thread safe. */
  40. auto reset() noexcept -> void;
  41. /**
  42. * Return the number of elements available for reading. This is the number
  43. * of elements in front of the read pointer and behind the write pointer.
  44. */
  45. [[nodiscard]] auto readSpace() const noexcept -> std::size_t
  46. {
  47. const std::size_t w{mWriteCount.load(std::memory_order_acquire)};
  48. const std::size_t r{mReadCount.load(std::memory_order_acquire)};
  49. /* mWriteCount is never more than mWriteSize greater than mReadCount. */
  50. return w - r;
  51. }
  52. /**
  53. * The copying data reader. Copy at most `count' elements into `dest'.
  54. * Returns the actual number of elements copied.
  55. */
  56. [[nodiscard]] auto read(void *dest, std::size_t count) noexcept -> std::size_t;
  57. /**
  58. * The copying data reader w/o read pointer advance. Copy at most `count'
  59. * elements into `dest'. Returns the actual number of elements copied.
  60. */
  61. [[nodiscard]] auto peek(void *dest, std::size_t count) const noexcept -> std::size_t;
  62. /**
  63. * The non-copying data reader. Returns two ringbuffer data pointers that
  64. * hold the current readable data. If the readable data is in one segment
  65. * the second segment has zero length.
  66. */
  67. [[nodiscard]] auto getReadVector() noexcept -> DataPair;
  68. /** Advance the read pointer `count' places. */
  69. auto readAdvance(std::size_t count) noexcept -> void
  70. {
  71. const std::size_t w{mWriteCount.load(std::memory_order_acquire)};
  72. const std::size_t r{mReadCount.load(std::memory_order_relaxed)};
  73. [[maybe_unused]] const std::size_t readable{w - r};
  74. assert(readable >= count);
  75. mReadCount.store(r+count, std::memory_order_release);
  76. }
  77. /**
  78. * Return the number of elements available for writing. This is the total
  79. * number of writable elements excluding what's readable (already written).
  80. */
  81. [[nodiscard]] auto writeSpace() const noexcept -> std::size_t
  82. { return mWriteSize - readSpace(); }
  83. /**
  84. * The copying data writer. Copy at most `count' elements from `src'. Returns
  85. * the actual number of elements copied.
  86. */
  87. [[nodiscard]] auto write(const void *src, std::size_t count) noexcept -> std::size_t;
  88. /**
  89. * The non-copying data writer. Returns two ringbuffer data pointers that
  90. * hold the current writeable data. If the writeable data is in one segment
  91. * the second segment has zero length.
  92. */
  93. [[nodiscard]] auto getWriteVector() noexcept -> DataPair;
  94. /** Advance the write pointer `count' places. */
  95. auto writeAdvance(std::size_t count) noexcept -> void
  96. {
  97. const std::size_t w{mWriteCount.load(std::memory_order_relaxed)};
  98. const std::size_t r{mReadCount.load(std::memory_order_acquire)};
  99. [[maybe_unused]] const std::size_t writable{mWriteSize - (w - r)};
  100. assert(writable >= count);
  101. mWriteCount.store(w+count, std::memory_order_release);
  102. }
  103. [[nodiscard]] auto getElemSize() const noexcept -> std::size_t { return mElemSize; }
  104. /**
  105. * Create a new ringbuffer to hold at least `sz' elements of `elem_sz'
  106. * bytes. The number of elements is rounded up to a power of two. If
  107. * `limit_writes' is true, the writable space will be limited to `sz'
  108. * elements regardless of the rounded size.
  109. */
  110. [[nodiscard]] static
  111. auto Create(std::size_t sz, std::size_t elem_sz, bool limit_writes) -> std::unique_ptr<RingBuffer>;
  112. DEF_FAM_NEWDEL(RingBuffer, mBuffer)
  113. };
  114. using RingBufferPtr = std::unique_ptr<RingBuffer>;
  115. #endif /* RINGBUFFER_H */