extended.odin 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. package sync2
  2. // A Wait_Group waits for a collection of threads to finish
  3. //
  4. // A Wait_Group must not be copied after first use
  5. Wait_Group :: struct {
  6. counter: int,
  7. mutex: Mutex,
  8. cond: Cond,
  9. }
  10. wait_group_add :: proc(wg: ^Wait_Group, delta: int) {
  11. if delta == 0 {
  12. return;
  13. }
  14. mutex_lock(&wg.mutex);
  15. defer mutex_unlock(&wg.mutex);
  16. atomic_add(&wg.counter, delta);
  17. if wg.counter < 0 {
  18. panic("sync.Wait_Group negative counter");
  19. }
  20. if wg.counter == 0 {
  21. cond_broadcast(&wg.cond);
  22. if wg.counter != 0 {
  23. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait");
  24. }
  25. }
  26. }
  27. wait_group_done :: proc(wg: ^Wait_Group) {
  28. wait_group_add(wg, -1);
  29. }
  30. wait_group_wait :: proc(wg: ^Wait_Group) {
  31. mutex_lock(&wg.mutex);
  32. defer mutex_unlock(&wg.mutex);
  33. if wg.counter != 0 {
  34. cond_wait(&wg.cond, &wg.mutex);
  35. if wg.counter != 0 {
  36. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait");
  37. }
  38. }
  39. }
  40. // A barrier enabling multiple threads to synchronize the beginning of some computation
  41. /*
  42. * Example:
  43. *
  44. * package example
  45. *
  46. * import "core:fmt"
  47. * import "core:sync"
  48. * import "core:thread"
  49. *
  50. * barrier := &sync.Barrier{};
  51. *
  52. * main :: proc() {
  53. * fmt.println("Start");
  54. *
  55. * THREAD_COUNT :: 4;
  56. * threads: [THREAD_COUNT]^thread.Thread;
  57. *
  58. * sync.barrier_init(barrier, THREAD_COUNT);
  59. * defer sync.barrier_destroy(barrier);
  60. *
  61. *
  62. * for _, i in threads {
  63. * threads[i] = thread.create_and_start(proc(t: ^thread.Thread) {
  64. * // Same messages will be printed together but without any interleaving
  65. * fmt.println("Getting ready!");
  66. * sync.barrier_wait(barrier);
  67. * fmt.println("Off their marks they go!");
  68. * });
  69. * }
  70. *
  71. * for t in threads {
  72. * thread.destroy(t); // join and free thread
  73. * }
  74. * fmt.println("Finished");
  75. * }
  76. *
  77. */
  78. Barrier :: struct {
  79. mutex: Mutex,
  80. cond: Cond,
  81. index: int,
  82. generation_id: int,
  83. thread_count: int,
  84. }
  85. barrier_init :: proc(b: ^Barrier, thread_count: int) {
  86. b.index = 0;
  87. b.generation_id = 0;
  88. b.thread_count = thread_count;
  89. }
  90. // Block the current thread until all threads have rendezvoused
  91. // Barrier can be reused after all threads rendezvoused once, and can be used continuously
  92. barrier_wait :: proc(b: ^Barrier) -> (is_leader: bool) {
  93. mutex_lock(&b.mutex);
  94. defer mutex_unlock(&b.mutex);
  95. local_gen := b.generation_id;
  96. b.index += 1;
  97. if b.index < b.thread_count {
  98. for local_gen == b.generation_id && b.index < b.thread_count {
  99. cond_wait(&b.cond, &b.mutex);
  100. }
  101. return false;
  102. }
  103. b.index = 0;
  104. b.generation_id += 1;
  105. cond_broadcast(&b.cond);
  106. return true;
  107. }
  108. Auto_Reset_Event :: struct {
  109. // status == 0: Event is reset and no threads are waiting
  110. // status == 1: Event is signaled
  111. // status == -N: Event is reset and N threads are waiting
  112. status: i32,
  113. sema: Sema,
  114. }
  115. auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
  116. old_status := atomic_load_relaxed(&e.status);
  117. for {
  118. new_status := old_status + 1 if old_status < 1 else 1;
  119. if _, ok := atomic_compare_exchange_weak_release(&e.status, old_status, new_status); ok {
  120. break;
  121. }
  122. if old_status < 0 {
  123. sema_post(&e.sema);
  124. }
  125. }
  126. }
  127. auto_reset_event_wait :: proc(e: ^Auto_Reset_Event) {
  128. old_status := atomic_sub_acquire(&e.status, 1);
  129. if old_status < 1 {
  130. sema_wait(&e.sema);
  131. }
  132. }
  133. Ticket_Mutex :: struct {
  134. ticket: uint,
  135. serving: uint,
  136. }
  137. ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
  138. ticket := atomic_add_relaxed(&m.ticket, 1);
  139. for ticket != atomic_load_acquire(&m.serving) {
  140. cpu_relax();
  141. }
  142. }
  143. ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
  144. atomic_add_relaxed(&m.serving, 1);
  145. }
  146. Benaphore :: struct {
  147. counter: i32,
  148. sema: Sema,
  149. }
  150. benaphore_lock :: proc(b: ^Benaphore) {
  151. if atomic_add_acquire(&b.counter, 1) > 1 {
  152. sema_wait(&b.sema);
  153. }
  154. }
  155. benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
  156. v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0);
  157. return v == 0;
  158. }
  159. benaphore_unlock :: proc(b: ^Benaphore) {
  160. if atomic_sub_release(&b.counter, 1) > 0 {
  161. sema_post(&b.sema);
  162. }
  163. }
  164. Recursive_Benaphore :: struct {
  165. counter: int,
  166. owner: int,
  167. recursion: i32,
  168. sema: Sema,
  169. }
  170. recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
  171. tid := current_thread_id();
  172. if atomic_add_acquire(&b.counter, 1) > 1 {
  173. if tid != b.owner {
  174. sema_wait(&b.sema);
  175. }
  176. }
  177. // inside the lock
  178. b.owner = tid;
  179. b.recursion += 1;
  180. }
  181. recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
  182. tid := current_thread_id();
  183. if b.owner == tid {
  184. atomic_add_acquire(&b.counter, 1);
  185. }
  186. if v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0); v != 0 {
  187. return false;
  188. }
  189. // inside the lock
  190. b.owner = tid;
  191. b.recursion += 1;
  192. return true;
  193. }
  194. recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
  195. tid := current_thread_id();
  196. assert(tid == b.owner);
  197. b.recursion -= 1;
  198. recursion := b.recursion;
  199. if recursion == 0 {
  200. b.owner = 0;
  201. }
  202. if atomic_sub_release(&b.counter, 1) > 0 {
  203. if recursion == 0 {
  204. sema_post(&b.sema);
  205. }
  206. }
  207. // outside the lock
  208. }
  209. Once :: struct {
  210. m: Mutex,
  211. done: bool,
  212. }
  213. once_do :: proc(o: ^Once, fn: proc()) {
  214. if atomic_load_acquire(&o.done) == false {
  215. _once_do_slow(o, fn);
  216. }
  217. }
  218. @(cold)
  219. _once_do_slow :: proc(o: ^Once, fn: proc()) {
  220. mutex_lock(&o.m);
  221. defer mutex_unlock(&o.m);
  222. if !o.done {
  223. fn();
  224. atomic_store_release(&o.done, true);
  225. }
  226. }