extended.odin 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. package sync2
  2. import "core:time"
  3. // A Wait_Group waits for a collection of threads to finish
  4. //
  5. // A Wait_Group must not be copied after first use
  6. Wait_Group :: struct {
  7. counter: int,
  8. mutex: Mutex,
  9. cond: Cond,
  10. }
  11. wait_group_add :: proc(wg: ^Wait_Group, delta: int) {
  12. if delta == 0 {
  13. return
  14. }
  15. mutex_lock(&wg.mutex)
  16. defer mutex_unlock(&wg.mutex)
  17. atomic_add(&wg.counter, delta)
  18. if wg.counter < 0 {
  19. panic("sync.Wait_Group negative counter")
  20. }
  21. if wg.counter == 0 {
  22. cond_broadcast(&wg.cond)
  23. if wg.counter != 0 {
  24. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  25. }
  26. }
  27. }
  28. wait_group_done :: proc(wg: ^Wait_Group) {
  29. wait_group_add(wg, -1)
  30. }
  31. wait_group_wait :: proc(wg: ^Wait_Group) {
  32. mutex_lock(&wg.mutex)
  33. defer mutex_unlock(&wg.mutex)
  34. if wg.counter != 0 {
  35. cond_wait(&wg.cond, &wg.mutex)
  36. if wg.counter != 0 {
  37. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  38. }
  39. }
  40. }
  41. wait_group_wait_with_timeout :: proc(wg: ^Wait_Group, duration: time.Duration) -> bool {
  42. if duration <= 0 {
  43. return false
  44. }
  45. mutex_lock(&wg.mutex)
  46. defer mutex_unlock(&wg.mutex)
  47. if wg.counter != 0 {
  48. if !cond_wait_with_timeout(&wg.cond, &wg.mutex, duration) {
  49. return false
  50. }
  51. if wg.counter != 0 {
  52. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  53. }
  54. }
  55. return true
  56. }
  57. // A barrier enabling multiple threads to synchronize the beginning of some computation
  58. /*
  59. * Example:
  60. *
  61. * package example
  62. *
  63. * import "core:fmt"
  64. * import "core:sync"
  65. * import "core:thread"
  66. *
  67. * barrier := &sync.Barrier{}
  68. *
  69. * main :: proc() {
  70. * fmt.println("Start")
  71. *
  72. * THREAD_COUNT :: 4
  73. * threads: [THREAD_COUNT]^thread.Thread
  74. *
  75. * sync.barrier_init(barrier, THREAD_COUNT)
  76. * defer sync.barrier_destroy(barrier)
  77. *
  78. *
  79. * for _, i in threads {
  80. * threads[i] = thread.create_and_start(proc(t: ^thread.Thread) {
  81. * // Same messages will be printed together but without any interleaving
  82. * fmt.println("Getting ready!")
  83. * sync.barrier_wait(barrier)
  84. * fmt.println("Off their marks they go!")
  85. * })
  86. * }
  87. *
  88. * for t in threads {
  89. * thread.destroy(t) // join and free thread
  90. * }
  91. * fmt.println("Finished")
  92. * }
  93. *
  94. */
  95. Barrier :: struct {
  96. mutex: Mutex,
  97. cond: Cond,
  98. index: int,
  99. generation_id: int,
  100. thread_count: int,
  101. }
  102. barrier_init :: proc(b: ^Barrier, thread_count: int) {
  103. b.index = 0
  104. b.generation_id = 0
  105. b.thread_count = thread_count
  106. }
  107. // Block the current thread until all threads have rendezvoused
  108. // Barrier can be reused after all threads rendezvoused once, and can be used continuously
  109. barrier_wait :: proc(b: ^Barrier) -> (is_leader: bool) {
  110. mutex_lock(&b.mutex)
  111. defer mutex_unlock(&b.mutex)
  112. local_gen := b.generation_id
  113. b.index += 1
  114. if b.index < b.thread_count {
  115. for local_gen == b.generation_id && b.index < b.thread_count {
  116. cond_wait(&b.cond, &b.mutex)
  117. }
  118. return false
  119. }
  120. b.index = 0
  121. b.generation_id += 1
  122. cond_broadcast(&b.cond)
  123. return true
  124. }
  125. Auto_Reset_Event :: struct {
  126. // status == 0: Event is reset and no threads are waiting
  127. // status == 1: Event is signaled
  128. // status == -N: Event is reset and N threads are waiting
  129. status: i32,
  130. sema: Sema,
  131. }
  132. auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
  133. old_status := atomic_load_relaxed(&e.status)
  134. for {
  135. new_status := old_status + 1 if old_status < 1 else 1
  136. if _, ok := atomic_compare_exchange_weak_release(&e.status, old_status, new_status); ok {
  137. break
  138. }
  139. if old_status < 0 {
  140. sema_post(&e.sema)
  141. }
  142. }
  143. }
  144. auto_reset_event_wait :: proc(e: ^Auto_Reset_Event) {
  145. old_status := atomic_sub_acquire(&e.status, 1)
  146. if old_status < 1 {
  147. sema_wait(&e.sema)
  148. }
  149. }
  150. Ticket_Mutex :: struct {
  151. ticket: uint,
  152. serving: uint,
  153. }
  154. ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
  155. ticket := atomic_add_relaxed(&m.ticket, 1)
  156. for ticket != atomic_load_acquire(&m.serving) {
  157. cpu_relax()
  158. }
  159. }
  160. ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
  161. atomic_add_relaxed(&m.serving, 1)
  162. }
  163. @(deferred_in=ticket_mutex_unlock)
  164. ticket_mutex_guard :: proc(m: ^Ticket_Mutex) -> bool {
  165. ticket_mutex_lock(m)
  166. return true
  167. }
  168. Benaphore :: struct {
  169. counter: i32,
  170. sema: Sema,
  171. }
  172. benaphore_lock :: proc(b: ^Benaphore) {
  173. if atomic_add_acquire(&b.counter, 1) > 1 {
  174. sema_wait(&b.sema)
  175. }
  176. }
  177. benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
  178. v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0)
  179. return v == 0
  180. }
  181. benaphore_unlock :: proc(b: ^Benaphore) {
  182. if atomic_sub_release(&b.counter, 1) > 0 {
  183. sema_post(&b.sema)
  184. }
  185. }
  186. @(deferred_in=benaphore_unlock)
  187. benaphore_guard :: proc(m: ^Benaphore) -> bool {
  188. benaphore_lock(m)
  189. return true
  190. }
  191. Recursive_Benaphore :: struct {
  192. counter: int,
  193. owner: int,
  194. recursion: i32,
  195. sema: Sema,
  196. }
  197. recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
  198. tid := current_thread_id()
  199. if atomic_add_acquire(&b.counter, 1) > 1 {
  200. if tid != b.owner {
  201. sema_wait(&b.sema)
  202. }
  203. }
  204. // inside the lock
  205. b.owner = tid
  206. b.recursion += 1
  207. }
  208. recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
  209. tid := current_thread_id()
  210. if b.owner == tid {
  211. atomic_add_acquire(&b.counter, 1)
  212. }
  213. if v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0); v != 0 {
  214. return false
  215. }
  216. // inside the lock
  217. b.owner = tid
  218. b.recursion += 1
  219. return true
  220. }
  221. recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
  222. tid := current_thread_id()
  223. assert(tid == b.owner)
  224. b.recursion -= 1
  225. recursion := b.recursion
  226. if recursion == 0 {
  227. b.owner = 0
  228. }
  229. if atomic_sub_release(&b.counter, 1) > 0 {
  230. if recursion == 0 {
  231. sema_post(&b.sema)
  232. }
  233. }
  234. // outside the lock
  235. }
  236. @(deferred_in=recursive_benaphore_unlock)
  237. recursive_benaphore_guard :: proc(m: ^Recursive_Benaphore) -> bool {
  238. recursive_benaphore_lock(m)
  239. return true
  240. }
  241. // Once is a data value that will perform exactly on action.
  242. //
  243. // A Once must not be copied after first use.
  244. Once :: struct {
  245. m: Mutex,
  246. done: bool,
  247. }
  248. // once_do calls the procedure fn if and only if once_do is being called for the first for this instance of Once.
  249. once_do :: proc(o: ^Once, fn: proc()) {
  250. @(cold)
  251. do_slow :: proc(o: ^Once, fn: proc()) {
  252. mutex_lock(&o.m)
  253. defer mutex_unlock(&o.m)
  254. if !o.done {
  255. fn()
  256. atomic_store_release(&o.done, true)
  257. }
  258. }
  259. if atomic_load_acquire(&o.done) == false {
  260. do_slow(o, fn)
  261. }
  262. }