extended.odin 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. package sync2
  2. import "core:time"
  3. // A Wait_Group waits for a collection of threads to finish
  4. //
  5. // A Wait_Group must not be copied after first use
  6. Wait_Group :: struct {
  7. counter: int,
  8. mutex: Mutex,
  9. cond: Cond,
  10. }
  11. wait_group_add :: proc(wg: ^Wait_Group, delta: int) {
  12. if delta == 0 {
  13. return
  14. }
  15. mutex_lock(&wg.mutex)
  16. defer mutex_unlock(&wg.mutex)
  17. atomic_add(&wg.counter, delta)
  18. if wg.counter < 0 {
  19. panic("sync.Wait_Group negative counter")
  20. }
  21. if wg.counter == 0 {
  22. cond_broadcast(&wg.cond)
  23. if wg.counter != 0 {
  24. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  25. }
  26. }
  27. }
  28. wait_group_done :: proc(wg: ^Wait_Group) {
  29. wait_group_add(wg, -1)
  30. }
  31. wait_group_wait :: proc(wg: ^Wait_Group) {
  32. mutex_lock(&wg.mutex)
  33. defer mutex_unlock(&wg.mutex)
  34. if wg.counter != 0 {
  35. cond_wait(&wg.cond, &wg.mutex)
  36. if wg.counter != 0 {
  37. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  38. }
  39. }
  40. }
  41. wait_group_wait_with_timeout :: proc(wg: ^Wait_Group, duration: time.Duration) -> bool {
  42. if duration <= 0 {
  43. return false
  44. }
  45. mutex_lock(&wg.mutex)
  46. defer mutex_unlock(&wg.mutex)
  47. if wg.counter != 0 {
  48. if !cond_wait_with_timeout(&wg.cond, &wg.mutex, duration) {
  49. return false
  50. }
  51. if wg.counter != 0 {
  52. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  53. }
  54. }
  55. return true
  56. }
  57. /*
  58. A barrier enabling multiple threads to synchronize the beginning of some computation
  59. Example:
  60. package example
  61. import "core:fmt"
  62. import "core:sync"
  63. import "core:thread"
  64. barrier := &sync.Barrier{}
  65. main :: proc() {
  66. fmt.println("Start")
  67. THREAD_COUNT :: 4
  68. threads: [THREAD_COUNT]^thread.Thread
  69. sync.barrier_init(barrier, THREAD_COUNT)
  70. for _, i in threads {
  71. threads[i] = thread.create_and_start(proc(t: ^thread.Thread) {
  72. // Same messages will be printed together but without any interleaving
  73. fmt.println("Getting ready!")
  74. sync.barrier_wait(barrier)
  75. fmt.println("Off their marks they go!")
  76. })
  77. }
  78. for t in threads {
  79. thread.destroy(t) // join and free thread
  80. }
  81. fmt.println("Finished")
  82. }
  83. */
  84. Barrier :: struct {
  85. mutex: Mutex,
  86. cond: Cond,
  87. index: int,
  88. generation_id: int,
  89. thread_count: int,
  90. }
  91. barrier_init :: proc(b: ^Barrier, thread_count: int) {
  92. b.index = 0
  93. b.generation_id = 0
  94. b.thread_count = thread_count
  95. }
  96. // Block the current thread until all threads have rendezvoused
  97. // Barrier can be reused after all threads rendezvoused once, and can be used continuously
  98. barrier_wait :: proc(b: ^Barrier) -> (is_leader: bool) {
  99. mutex_lock(&b.mutex)
  100. defer mutex_unlock(&b.mutex)
  101. local_gen := b.generation_id
  102. b.index += 1
  103. if b.index < b.thread_count {
  104. for local_gen == b.generation_id && b.index < b.thread_count {
  105. cond_wait(&b.cond, &b.mutex)
  106. }
  107. return false
  108. }
  109. b.index = 0
  110. b.generation_id += 1
  111. cond_broadcast(&b.cond)
  112. return true
  113. }
  114. Auto_Reset_Event :: struct {
  115. // status == 0: Event is reset and no threads are waiting
  116. // status == 1: Event is signaled
  117. // status == -N: Event is reset and N threads are waiting
  118. status: i32,
  119. sema: Sema,
  120. }
  121. auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
  122. old_status := atomic_load_relaxed(&e.status)
  123. for {
  124. new_status := old_status + 1 if old_status < 1 else 1
  125. if _, ok := atomic_compare_exchange_weak_release(&e.status, old_status, new_status); ok {
  126. break
  127. }
  128. if old_status < 0 {
  129. sema_post(&e.sema)
  130. }
  131. }
  132. }
  133. auto_reset_event_wait :: proc(e: ^Auto_Reset_Event) {
  134. old_status := atomic_sub_acquire(&e.status, 1)
  135. if old_status < 1 {
  136. sema_wait(&e.sema)
  137. }
  138. }
  139. Ticket_Mutex :: struct {
  140. ticket: uint,
  141. serving: uint,
  142. }
  143. ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
  144. ticket := atomic_add_relaxed(&m.ticket, 1)
  145. for ticket != atomic_load_acquire(&m.serving) {
  146. cpu_relax()
  147. }
  148. }
  149. ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
  150. atomic_add_relaxed(&m.serving, 1)
  151. }
  152. @(deferred_in=ticket_mutex_unlock)
  153. ticket_mutex_guard :: proc(m: ^Ticket_Mutex) -> bool {
  154. ticket_mutex_lock(m)
  155. return true
  156. }
  157. Benaphore :: struct {
  158. counter: i32,
  159. sema: Sema,
  160. }
  161. benaphore_lock :: proc(b: ^Benaphore) {
  162. if atomic_add_acquire(&b.counter, 1) > 1 {
  163. sema_wait(&b.sema)
  164. }
  165. }
  166. benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
  167. v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0)
  168. return v == 0
  169. }
  170. benaphore_unlock :: proc(b: ^Benaphore) {
  171. if atomic_sub_release(&b.counter, 1) > 0 {
  172. sema_post(&b.sema)
  173. }
  174. }
  175. @(deferred_in=benaphore_unlock)
  176. benaphore_guard :: proc(m: ^Benaphore) -> bool {
  177. benaphore_lock(m)
  178. return true
  179. }
  180. Recursive_Benaphore :: struct {
  181. counter: int,
  182. owner: int,
  183. recursion: i32,
  184. sema: Sema,
  185. }
  186. recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
  187. tid := current_thread_id()
  188. if atomic_add_acquire(&b.counter, 1) > 1 {
  189. if tid != b.owner {
  190. sema_wait(&b.sema)
  191. }
  192. }
  193. // inside the lock
  194. b.owner = tid
  195. b.recursion += 1
  196. }
  197. recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
  198. tid := current_thread_id()
  199. if b.owner == tid {
  200. atomic_add_acquire(&b.counter, 1)
  201. }
  202. if v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0); v != 0 {
  203. return false
  204. }
  205. // inside the lock
  206. b.owner = tid
  207. b.recursion += 1
  208. return true
  209. }
  210. recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
  211. tid := current_thread_id()
  212. assert(tid == b.owner)
  213. b.recursion -= 1
  214. recursion := b.recursion
  215. if recursion == 0 {
  216. b.owner = 0
  217. }
  218. if atomic_sub_release(&b.counter, 1) > 0 {
  219. if recursion == 0 {
  220. sema_post(&b.sema)
  221. }
  222. }
  223. // outside the lock
  224. }
  225. @(deferred_in=recursive_benaphore_unlock)
  226. recursive_benaphore_guard :: proc(m: ^Recursive_Benaphore) -> bool {
  227. recursive_benaphore_lock(m)
  228. return true
  229. }
  230. // Once is a data value that will perform exactly on action.
  231. //
  232. // A Once must not be copied after first use.
  233. Once :: struct {
  234. m: Mutex,
  235. done: bool,
  236. }
  237. // once_do calls the procedure fn if and only if once_do is being called for the first for this instance of Once.
  238. once_do :: proc(o: ^Once, fn: proc()) {
  239. @(cold)
  240. do_slow :: proc(o: ^Once, fn: proc()) {
  241. mutex_lock(&o.m)
  242. defer mutex_unlock(&o.m)
  243. if !o.done {
  244. fn()
  245. atomic_store_release(&o.done, true)
  246. }
  247. }
  248. if atomic_load_acquire(&o.done) == false {
  249. do_slow(o, fn)
  250. }
  251. }