extended.odin 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. package sync
  2. import "core:time"
  3. // A Wait_Group waits for a collection of threads to finish
  4. //
  5. // A Wait_Group must not be copied after first use
  6. Wait_Group :: struct {
  7. counter: int,
  8. mutex: Mutex,
  9. cond: Cond,
  10. }
  11. wait_group_add :: proc(wg: ^Wait_Group, delta: int) {
  12. if delta == 0 {
  13. return
  14. }
  15. guard(&wg.mutex)
  16. atomic_add(&wg.counter, delta)
  17. if wg.counter < 0 {
  18. panic("sync.Wait_Group negative counter")
  19. }
  20. if wg.counter == 0 {
  21. cond_broadcast(&wg.cond)
  22. if wg.counter != 0 {
  23. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  24. }
  25. }
  26. }
  27. wait_group_done :: proc(wg: ^Wait_Group) {
  28. wait_group_add(wg, -1)
  29. }
  30. wait_group_wait :: proc(wg: ^Wait_Group) {
  31. guard(&wg.mutex)
  32. if wg.counter != 0 {
  33. cond_wait(&wg.cond, &wg.mutex)
  34. if wg.counter != 0 {
  35. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  36. }
  37. }
  38. }
  39. wait_group_wait_with_timeout :: proc(wg: ^Wait_Group, duration: time.Duration) -> bool {
  40. if duration <= 0 {
  41. return false
  42. }
  43. guard(&wg.mutex)
  44. if wg.counter != 0 {
  45. if !cond_wait_with_timeout(&wg.cond, &wg.mutex, duration) {
  46. return false
  47. }
  48. if wg.counter != 0 {
  49. panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  50. }
  51. }
  52. return true
  53. }
  54. /*
  55. A barrier enabling multiple threads to synchronize the beginning of some computation
  56. Example:
  57. package example
  58. import "core:fmt"
  59. import "core:sync"
  60. import "core:thread"
  61. barrier := &sync.Barrier{}
  62. main :: proc() {
  63. fmt.println("Start")
  64. THREAD_COUNT :: 4
  65. threads: [THREAD_COUNT]^thread.Thread
  66. sync.barrier_init(barrier, THREAD_COUNT)
  67. for _, i in threads {
  68. threads[i] = thread.create_and_start(proc(t: ^thread.Thread) {
  69. // Same messages will be printed together but without any interleaving
  70. fmt.println("Getting ready!")
  71. sync.barrier_wait(barrier)
  72. fmt.println("Off their marks they go!")
  73. })
  74. }
  75. for t in threads {
  76. thread.destroy(t) // join and free thread
  77. }
  78. fmt.println("Finished")
  79. }
  80. */
  81. Barrier :: struct {
  82. mutex: Mutex,
  83. cond: Cond,
  84. index: int,
  85. generation_id: int,
  86. thread_count: int,
  87. }
  88. barrier_init :: proc(b: ^Barrier, thread_count: int) {
  89. b.index = 0
  90. b.generation_id = 0
  91. b.thread_count = thread_count
  92. }
  93. // Block the current thread until all threads have rendezvoused
  94. // Barrier can be reused after all threads rendezvoused once, and can be used continuously
  95. barrier_wait :: proc(b: ^Barrier) -> (is_leader: bool) {
  96. guard(&b.mutex)
  97. local_gen := b.generation_id
  98. b.index += 1
  99. if b.index < b.thread_count {
  100. for local_gen == b.generation_id && b.index < b.thread_count {
  101. cond_wait(&b.cond, &b.mutex)
  102. }
  103. return false
  104. }
  105. b.index = 0
  106. b.generation_id += 1
  107. cond_broadcast(&b.cond)
  108. return true
  109. }
  110. Auto_Reset_Event :: struct {
  111. // status == 0: Event is reset and no threads are waiting
  112. // status == 1: Event is signaled
  113. // status == -N: Event is reset and N threads are waiting
  114. status: i32,
  115. sema: Sema,
  116. }
  117. auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
  118. old_status := atomic_load_explicit(&e.status, .Relaxed)
  119. for {
  120. new_status := old_status + 1 if old_status < 1 else 1
  121. if _, ok := atomic_compare_exchange_weak_explicit(&e.status, old_status, new_status, .Release, .Relaxed); ok {
  122. break
  123. }
  124. if old_status < 0 {
  125. sema_post(&e.sema)
  126. }
  127. }
  128. }
  129. auto_reset_event_wait :: proc(e: ^Auto_Reset_Event) {
  130. old_status := atomic_sub_explicit(&e.status, 1, .Acquire)
  131. if old_status < 1 {
  132. sema_wait(&e.sema)
  133. }
  134. }
  135. Ticket_Mutex :: struct {
  136. ticket: uint,
  137. serving: uint,
  138. }
  139. ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
  140. ticket := atomic_add_explicit(&m.ticket, 1, .Relaxed)
  141. for ticket != atomic_load_explicit(&m.serving, .Acquire) {
  142. cpu_relax()
  143. }
  144. }
  145. ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
  146. atomic_add_explicit(&m.serving, 1, .Relaxed)
  147. }
  148. @(deferred_in=ticket_mutex_unlock)
  149. ticket_mutex_guard :: proc(m: ^Ticket_Mutex) -> bool {
  150. ticket_mutex_lock(m)
  151. return true
  152. }
  153. Benaphore :: struct {
  154. counter: i32,
  155. sema: Sema,
  156. }
  157. benaphore_lock :: proc(b: ^Benaphore) {
  158. if atomic_add_explicit(&b.counter, 1, .Acquire) > 1 {
  159. sema_wait(&b.sema)
  160. }
  161. }
  162. benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
  163. v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 0, 1, .Acquire, .Acquire)
  164. return v == 0
  165. }
  166. benaphore_unlock :: proc(b: ^Benaphore) {
  167. if atomic_sub_explicit(&b.counter, 1, .Release) > 0 {
  168. sema_post(&b.sema)
  169. }
  170. }
  171. @(deferred_in=benaphore_unlock)
  172. benaphore_guard :: proc(m: ^Benaphore) -> bool {
  173. benaphore_lock(m)
  174. return true
  175. }
  176. Recursive_Benaphore :: struct {
  177. counter: int,
  178. owner: int,
  179. recursion: i32,
  180. sema: Sema,
  181. }
  182. recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
  183. tid := current_thread_id()
  184. if atomic_add_explicit(&b.counter, 1, .Acquire) > 1 {
  185. if tid != b.owner {
  186. sema_wait(&b.sema)
  187. }
  188. }
  189. // inside the lock
  190. b.owner = tid
  191. b.recursion += 1
  192. }
  193. recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
  194. tid := current_thread_id()
  195. if b.owner == tid {
  196. atomic_add_explicit(&b.counter, 1, .Acquire)
  197. }
  198. if v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 0, 1, .Acquire, .Acquire); v != 0 {
  199. return false
  200. }
  201. // inside the lock
  202. b.owner = tid
  203. b.recursion += 1
  204. return true
  205. }
  206. recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
  207. tid := current_thread_id()
  208. assert(tid == b.owner)
  209. b.recursion -= 1
  210. recursion := b.recursion
  211. if recursion == 0 {
  212. b.owner = 0
  213. }
  214. if atomic_sub_explicit(&b.counter, 1, .Release) > 0 {
  215. if recursion == 0 {
  216. sema_post(&b.sema)
  217. }
  218. }
  219. // outside the lock
  220. }
  221. @(deferred_in=recursive_benaphore_unlock)
  222. recursive_benaphore_guard :: proc(m: ^Recursive_Benaphore) -> bool {
  223. recursive_benaphore_lock(m)
  224. return true
  225. }
  226. // Once is a data value that will perform exactly on action.
  227. //
  228. // A Once must not be copied after first use.
  229. Once :: struct {
  230. m: Mutex,
  231. done: bool,
  232. }
  233. // once_do calls the procedure fn if and only if once_do is being called for the first for this instance of Once.
  234. once_do :: proc(o: ^Once, fn: proc()) {
  235. @(cold)
  236. do_slow :: proc(o: ^Once, fn: proc()) {
  237. guard(&o.m)
  238. if !o.done {
  239. fn()
  240. atomic_store_explicit(&o.done, true, .Release)
  241. }
  242. }
  243. if atomic_load_explicit(&o.done, .Acquire) == false {
  244. do_slow(o, fn)
  245. }
  246. }
  247. // A Parker is an associated token which is initially not present:
  248. // * The `park` procedure blocks the current thread unless or until the token
  249. // is available, at which point the token is consumed.
  250. // * The `park_with_timeout` procedures works the same as `park` but only
  251. // blocks for the specified duration.
  252. // * The `unpark` procedure automatically makes the token available if it
  253. // was not already.
  254. Parker :: struct {
  255. state: Futex,
  256. }
  257. // Blocks the current thread until the token is made available.
  258. //
  259. // Assumes this is only called by the thread that owns the Parker.
  260. park :: proc(p: ^Parker) {
  261. EMPTY :: 0
  262. NOTIFIED :: 1
  263. PARKED :: max(u32)
  264. if atomic_sub_explicit(&p.state, 1, .Acquire) == NOTIFIED {
  265. return
  266. }
  267. for {
  268. futex_wait(&p.state, PARKED)
  269. if _, ok := atomic_compare_exchange_strong_explicit(&p.state, NOTIFIED, EMPTY, .Acquire, .Acquire); ok {
  270. return
  271. }
  272. }
  273. }
  274. // Blocks the current thread until the token is made available, but only
  275. // for a limited duration.
  276. //
  277. // Assumes this is only called by the thread that owns the Parker
  278. park_with_timeout :: proc(p: ^Parker, duration: time.Duration) {
  279. EMPTY :: 0
  280. NOTIFIED :: 1
  281. PARKED :: max(u32)
  282. if atomic_sub_explicit(&p.state, 1, .Acquire) == NOTIFIED {
  283. return
  284. }
  285. futex_wait_with_timeout(&p.state, PARKED, duration)
  286. atomic_exchange_explicit(&p.state, EMPTY, .Acquire)
  287. }
  288. // Automatically makes thee token available if it was not already.
  289. unpark :: proc(p: ^Parker) {
  290. EMPTY :: 0
  291. NOTIFIED :: 1
  292. PARKED :: max(Futex)
  293. if atomic_exchange_explicit(&p.state, NOTIFIED, .Release) == PARKED {
  294. futex_signal(&p.state)
  295. }
  296. }