test_rid.h 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /**************************************************************************/
  2. /* test_rid.h */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #ifndef TEST_RID_H
  31. #define TEST_RID_H
  32. #include "core/os/thread.h"
  33. #include "core/templates/local_vector.h"
  34. #include "core/templates/rid.h"
  35. #include "core/templates/rid_owner.h"
  36. #include "tests/test_macros.h"
  37. #ifdef SANITIZERS_ENABLED
  38. #ifdef __has_feature
  39. #if __has_feature(thread_sanitizer)
  40. #define TSAN_ENABLED
  41. #endif
  42. #elif defined(__SANITIZE_THREAD__)
  43. #define TSAN_ENABLED
  44. #endif
  45. #endif
  46. #ifdef TSAN_ENABLED
  47. #include <sanitizer/tsan_interface.h>
  48. #endif
  49. namespace TestRID {
  50. TEST_CASE("[RID] Default Constructor") {
  51. RID rid;
  52. CHECK(rid.get_id() == 0);
  53. }
  54. TEST_CASE("[RID] Factory method") {
  55. RID rid = RID::from_uint64(1);
  56. CHECK(rid.get_id() == 1);
  57. }
  58. TEST_CASE("[RID] Operators") {
  59. RID rid = RID::from_uint64(1);
  60. RID rid_zero = RID::from_uint64(0);
  61. RID rid_one = RID::from_uint64(1);
  62. RID rid_two = RID::from_uint64(2);
  63. CHECK_FALSE(rid == rid_zero);
  64. CHECK(rid == rid_one);
  65. CHECK_FALSE(rid == rid_two);
  66. CHECK_FALSE(rid < rid_zero);
  67. CHECK_FALSE(rid < rid_one);
  68. CHECK(rid < rid_two);
  69. CHECK_FALSE(rid <= rid_zero);
  70. CHECK(rid <= rid_one);
  71. CHECK(rid <= rid_two);
  72. CHECK(rid > rid_zero);
  73. CHECK_FALSE(rid > rid_one);
  74. CHECK_FALSE(rid > rid_two);
  75. CHECK(rid >= rid_zero);
  76. CHECK(rid >= rid_one);
  77. CHECK_FALSE(rid >= rid_two);
  78. CHECK(rid != rid_zero);
  79. CHECK_FALSE(rid != rid_one);
  80. CHECK(rid != rid_two);
  81. }
  82. TEST_CASE("[RID] 'is_valid' & 'is_null'") {
  83. RID rid_zero = RID::from_uint64(0);
  84. RID rid_one = RID::from_uint64(1);
  85. CHECK_FALSE(rid_zero.is_valid());
  86. CHECK(rid_zero.is_null());
  87. CHECK(rid_one.is_valid());
  88. CHECK_FALSE(rid_one.is_null());
  89. }
  90. TEST_CASE("[RID] 'get_local_index'") {
  91. CHECK(RID::from_uint64(1).get_local_index() == 1);
  92. CHECK(RID::from_uint64(4'294'967'295).get_local_index() == 4'294'967'295);
  93. CHECK(RID::from_uint64(4'294'967'297).get_local_index() == 1);
  94. }
  95. // This case would let sanitizers realize data races.
  96. // Additionally, on purely weakly ordered architectures, it would detect synchronization issues
  97. // if RID_Alloc failed to impose proper memory ordering and the test's threads are distributed
  98. // among multiple L1 caches.
  99. TEST_CASE("[RID_Owner] Thread safety") {
  100. struct DataHolder {
  101. char data[Thread::CACHE_LINE_BYTES];
  102. };
  103. struct RID_OwnerTester {
  104. uint32_t thread_count = 0;
  105. RID_Owner<DataHolder, true> rid_owner;
  106. TightLocalVector<Thread> threads;
  107. SafeNumeric<uint32_t> next_thread_idx;
  108. // Using std::atomic directly since SafeNumeric doesn't support relaxed ordering.
  109. TightLocalVector<std::atomic<uint64_t>> rids;
  110. std::atomic<uint32_t> sync[2] = {};
  111. std::atomic<uint32_t> correct = 0;
  112. // A barrier that doesn't introduce memory ordering constraints, only compiler ones.
  113. // The idea is not to cause any sync traffic that would make the code we want to test
  114. // seem correct as a side effect.
  115. void lockstep(uint32_t p_step) {
  116. uint32_t buf_idx = p_step % 2;
  117. uint32_t target = (p_step / 2 + 1) * threads.size();
  118. sync[buf_idx].fetch_add(1, std::memory_order_relaxed);
  119. do {
  120. std::this_thread::yield();
  121. } while (sync[buf_idx].load(std::memory_order_relaxed) != target);
  122. }
  123. explicit RID_OwnerTester(bool p_chunk_for_all, bool p_chunks_preallocated) :
  124. thread_count(OS::get_singleton()->get_processor_count()),
  125. rid_owner(sizeof(DataHolder) * (p_chunk_for_all ? thread_count : 1)) {
  126. threads.resize(thread_count);
  127. rids.resize(threads.size());
  128. if (p_chunks_preallocated) {
  129. LocalVector<RID> prealloc_rids;
  130. for (uint32_t i = 0; i < (p_chunk_for_all ? 1 : threads.size()); i++) {
  131. prealloc_rids.push_back(rid_owner.make_rid());
  132. }
  133. for (uint32_t i = 0; i < prealloc_rids.size(); i++) {
  134. rid_owner.free(prealloc_rids[i]);
  135. }
  136. }
  137. }
  138. ~RID_OwnerTester() {
  139. for (uint32_t i = 0; i < threads.size(); i++) {
  140. rid_owner.free(RID::from_uint64(rids[i].load(std::memory_order_relaxed)));
  141. }
  142. }
  143. void test() {
  144. for (uint32_t i = 0; i < threads.size(); i++) {
  145. threads[i].start(
  146. [](void *p_data) {
  147. RID_OwnerTester *rot = (RID_OwnerTester *)p_data;
  148. auto _compute_thread_unique_byte = [](uint32_t p_idx) -> char {
  149. return ((p_idx & 0xff) ^ (0b11111110 << (p_idx % 8)));
  150. };
  151. // 1. Each thread gets a zero-based index.
  152. uint32_t self_th_idx = rot->next_thread_idx.postincrement();
  153. rot->lockstep(0);
  154. // 2. Each thread makes a RID holding unique data.
  155. DataHolder initial_data;
  156. memset(&initial_data, _compute_thread_unique_byte(self_th_idx), Thread::CACHE_LINE_BYTES);
  157. RID my_rid = rot->rid_owner.make_rid(initial_data);
  158. rot->rids[self_th_idx].store(my_rid.get_id(), std::memory_order_relaxed);
  159. rot->lockstep(1);
  160. // 3. Each thread verifies all the others.
  161. uint32_t local_correct = 0;
  162. for (uint32_t th_idx = 0; th_idx < rot->threads.size(); th_idx++) {
  163. if (th_idx == self_th_idx) {
  164. continue;
  165. }
  166. char expected_unique_byte = _compute_thread_unique_byte(th_idx);
  167. RID rid = RID::from_uint64(rot->rids[th_idx].load(std::memory_order_relaxed));
  168. DataHolder *data = rot->rid_owner.get_or_null(rid);
  169. #ifdef TSAN_ENABLED
  170. __tsan_acquire(data); // We know not a race in practice.
  171. #endif
  172. bool ok = true;
  173. for (uint32_t j = 0; j < Thread::CACHE_LINE_BYTES; j++) {
  174. if (data->data[j] != expected_unique_byte) {
  175. ok = false;
  176. break;
  177. }
  178. }
  179. if (ok) {
  180. local_correct++;
  181. }
  182. #ifdef TSAN_ENABLED
  183. __tsan_release(data);
  184. #endif
  185. }
  186. rot->lockstep(2);
  187. rot->correct.fetch_add(local_correct, std::memory_order_acq_rel);
  188. },
  189. this);
  190. }
  191. for (uint32_t i = 0; i < threads.size(); i++) {
  192. threads[i].wait_to_finish();
  193. }
  194. CHECK_EQ(correct.load(), threads.size() * (threads.size() - 1));
  195. }
  196. };
  197. SUBCASE("All items in one chunk, pre-allocated") {
  198. RID_OwnerTester tester(true, true);
  199. tester.test();
  200. }
  201. SUBCASE("All items in one chunk, NOT pre-allocated") {
  202. RID_OwnerTester tester(true, false);
  203. tester.test();
  204. }
  205. SUBCASE("One item per chunk, pre-allocated") {
  206. RID_OwnerTester tester(false, true);
  207. tester.test();
  208. }
  209. SUBCASE("One item per chunk, NOT pre-allocated") {
  210. RID_OwnerTester tester(false, false);
  211. tester.test();
  212. }
  213. }
  214. } // namespace TestRID
  215. #endif // TEST_RID_H