extended.odin 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. package sync
  2. import "core:time"
  3. import vg "core:sys/valgrind"
  4. _ :: vg
  5. // A Wait_Group waits for a collection of threads to finish
  6. //
  7. // A Wait_Group must not be copied after first use
  8. Wait_Group :: struct #no_copy {
  9. counter: int,
  10. mutex: Mutex,
  11. cond: Cond,
  12. }
  13. wait_group_add :: proc "contextless" (wg: ^Wait_Group, delta: int) {
  14. if delta == 0 {
  15. return
  16. }
  17. guard(&wg.mutex)
  18. atomic_add(&wg.counter, delta)
  19. if wg.counter < 0 {
  20. _panic("sync.Wait_Group negative counter")
  21. }
  22. if wg.counter == 0 {
  23. cond_broadcast(&wg.cond)
  24. if wg.counter != 0 {
  25. _panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  26. }
  27. }
  28. }
  29. wait_group_done :: proc "contextless" (wg: ^Wait_Group) {
  30. wait_group_add(wg, -1)
  31. }
  32. wait_group_wait :: proc "contextless" (wg: ^Wait_Group) {
  33. guard(&wg.mutex)
  34. if wg.counter != 0 {
  35. cond_wait(&wg.cond, &wg.mutex)
  36. if wg.counter != 0 {
  37. _panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  38. }
  39. }
  40. }
  41. wait_group_wait_with_timeout :: proc "contextless" (wg: ^Wait_Group, duration: time.Duration) -> bool {
  42. if duration <= 0 {
  43. return false
  44. }
  45. guard(&wg.mutex)
  46. if wg.counter != 0 {
  47. if !cond_wait_with_timeout(&wg.cond, &wg.mutex, duration) {
  48. return false
  49. }
  50. if wg.counter != 0 {
  51. _panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
  52. }
  53. }
  54. return true
  55. }
  56. /*
  57. A barrier enabling multiple threads to synchronize the beginning of some computation
  58. Example:
  59. package example
  60. import "core:fmt"
  61. import "core:sync"
  62. import "core:thread"
  63. barrier := &sync.Barrier{}
  64. main :: proc "contextless" () {
  65. fmt.println("Start")
  66. THREAD_COUNT :: 4
  67. threads: [THREAD_COUNT]^thread.Thread
  68. sync.barrier_init(barrier, THREAD_COUNT)
  69. for _, i in threads {
  70. threads[i] = thread.create_and_start(proc(t: ^thread.Thread) {
  71. // Same messages will be printed together but without any interleaving
  72. fmt.println("Getting ready!")
  73. sync.barrier_wait(barrier)
  74. fmt.println("Off their marks they go!")
  75. })
  76. }
  77. for t in threads {
  78. thread.destroy(t) // join and free thread
  79. }
  80. fmt.println("Finished")
  81. }
  82. */
  83. Barrier :: struct #no_copy {
  84. mutex: Mutex,
  85. cond: Cond,
  86. index: int,
  87. generation_id: int,
  88. thread_count: int,
  89. }
  90. barrier_init :: proc "contextless" (b: ^Barrier, thread_count: int) {
  91. when ODIN_VALGRIND_SUPPORT {
  92. vg.helgrind_barrier_resize_pre(b, uint(thread_count))
  93. }
  94. b.index = 0
  95. b.generation_id = 0
  96. b.thread_count = thread_count
  97. }
  98. // Block the current thread until all threads have rendezvoused
  99. // Barrier can be reused after all threads rendezvoused once, and can be used continuously
  100. barrier_wait :: proc "contextless" (b: ^Barrier) -> (is_leader: bool) {
  101. when ODIN_VALGRIND_SUPPORT {
  102. vg.helgrind_barrier_wait_pre(b)
  103. }
  104. guard(&b.mutex)
  105. local_gen := b.generation_id
  106. b.index += 1
  107. if b.index < b.thread_count {
  108. for local_gen == b.generation_id && b.index < b.thread_count {
  109. cond_wait(&b.cond, &b.mutex)
  110. }
  111. return false
  112. }
  113. b.index = 0
  114. b.generation_id += 1
  115. cond_broadcast(&b.cond)
  116. return true
  117. }
  118. Auto_Reset_Event :: struct #no_copy {
  119. // status == 0: Event is reset and no threads are waiting
  120. // status == 1: Event is signaled
  121. // status == -N: Event is reset and N threads are waiting
  122. status: i32,
  123. sema: Sema,
  124. }
  125. auto_reset_event_signal :: proc "contextless" (e: ^Auto_Reset_Event) {
  126. old_status := atomic_load_explicit(&e.status, .Relaxed)
  127. for {
  128. new_status := old_status + 1 if old_status < 1 else 1
  129. if _, ok := atomic_compare_exchange_weak_explicit(&e.status, old_status, new_status, .Release, .Relaxed); ok {
  130. break
  131. }
  132. if old_status < 0 {
  133. sema_post(&e.sema)
  134. }
  135. }
  136. }
  137. auto_reset_event_wait :: proc "contextless" (e: ^Auto_Reset_Event) {
  138. old_status := atomic_sub_explicit(&e.status, 1, .Acquire)
  139. if old_status < 1 {
  140. sema_wait(&e.sema)
  141. }
  142. }
  143. Ticket_Mutex :: struct #no_copy {
  144. ticket: uint,
  145. serving: uint,
  146. }
  147. ticket_mutex_lock :: #force_inline proc "contextless" (m: ^Ticket_Mutex) {
  148. ticket := atomic_add_explicit(&m.ticket, 1, .Relaxed)
  149. for ticket != atomic_load_explicit(&m.serving, .Acquire) {
  150. cpu_relax()
  151. }
  152. }
  153. ticket_mutex_unlock :: #force_inline proc "contextless" (m: ^Ticket_Mutex) {
  154. atomic_add_explicit(&m.serving, 1, .Relaxed)
  155. }
  156. @(deferred_in=ticket_mutex_unlock)
  157. ticket_mutex_guard :: proc "contextless" (m: ^Ticket_Mutex) -> bool {
  158. ticket_mutex_lock(m)
  159. return true
  160. }
  161. Benaphore :: struct #no_copy {
  162. counter: i32,
  163. sema: Sema,
  164. }
  165. benaphore_lock :: proc "contextless" (b: ^Benaphore) {
  166. if atomic_add_explicit(&b.counter, 1, .Acquire) > 1 {
  167. sema_wait(&b.sema)
  168. }
  169. }
  170. benaphore_try_lock :: proc "contextless" (b: ^Benaphore) -> bool {
  171. v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 0, 1, .Acquire, .Acquire)
  172. return v == 0
  173. }
  174. benaphore_unlock :: proc "contextless" (b: ^Benaphore) {
  175. if atomic_sub_explicit(&b.counter, 1, .Release) > 0 {
  176. sema_post(&b.sema)
  177. }
  178. }
  179. @(deferred_in=benaphore_unlock)
  180. benaphore_guard :: proc "contextless" (m: ^Benaphore) -> bool {
  181. benaphore_lock(m)
  182. return true
  183. }
  184. Recursive_Benaphore :: struct #no_copy {
  185. counter: int,
  186. owner: int,
  187. recursion: i32,
  188. sema: Sema,
  189. }
  190. recursive_benaphore_lock :: proc "contextless" (b: ^Recursive_Benaphore) {
  191. tid := current_thread_id()
  192. if atomic_add_explicit(&b.counter, 1, .Acquire) > 1 {
  193. if tid != b.owner {
  194. sema_wait(&b.sema)
  195. }
  196. }
  197. // inside the lock
  198. b.owner = tid
  199. b.recursion += 1
  200. }
  201. recursive_benaphore_try_lock :: proc "contextless" (b: ^Recursive_Benaphore) -> bool {
  202. tid := current_thread_id()
  203. if b.owner == tid {
  204. atomic_add_explicit(&b.counter, 1, .Acquire)
  205. }
  206. if v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 0, 1, .Acquire, .Acquire); v != 0 {
  207. return false
  208. }
  209. // inside the lock
  210. b.owner = tid
  211. b.recursion += 1
  212. return true
  213. }
  214. recursive_benaphore_unlock :: proc "contextless" (b: ^Recursive_Benaphore) {
  215. tid := current_thread_id()
  216. _assert(tid == b.owner, "tid != b.owner")
  217. b.recursion -= 1
  218. recursion := b.recursion
  219. if recursion == 0 {
  220. b.owner = 0
  221. }
  222. if atomic_sub_explicit(&b.counter, 1, .Release) > 0 {
  223. if recursion == 0 {
  224. sema_post(&b.sema)
  225. }
  226. }
  227. // outside the lock
  228. }
  229. @(deferred_in=recursive_benaphore_unlock)
  230. recursive_benaphore_guard :: proc "contextless" (m: ^Recursive_Benaphore) -> bool {
  231. recursive_benaphore_lock(m)
  232. return true
  233. }
  234. // Once is a data value that will perform exactly on action.
  235. //
  236. // A Once must not be copied after first use.
  237. Once :: struct #no_copy {
  238. m: Mutex,
  239. done: bool,
  240. }
  241. // once_do calls the procedure fn if and only if once_do is being called for the first for this instance of Once.
  242. once_do :: proc{
  243. once_do_without_data,
  244. once_do_without_data_contextless,
  245. once_do_with_data,
  246. once_do_with_data_contextless,
  247. }
  248. // once_do_without_data calls the procedure fn if and only if once_do_without_data is being called for the first for this instance of Once.
  249. once_do_without_data :: proc(o: ^Once, fn: proc()) {
  250. @(cold)
  251. do_slow :: proc(o: ^Once, fn: proc()) {
  252. guard(&o.m)
  253. if !o.done {
  254. fn()
  255. atomic_store_explicit(&o.done, true, .Release)
  256. }
  257. }
  258. if atomic_load_explicit(&o.done, .Acquire) == false {
  259. do_slow(o, fn)
  260. }
  261. }
  262. // once_do_without_data calls the procedure fn if and only if once_do_without_data is being called for the first for this instance of Once.
  263. once_do_without_data_contextless :: proc(o: ^Once, fn: proc "contextless" ()) {
  264. @(cold)
  265. do_slow :: proc(o: ^Once, fn: proc "contextless" ()) {
  266. guard(&o.m)
  267. if !o.done {
  268. fn()
  269. atomic_store_explicit(&o.done, true, .Release)
  270. }
  271. }
  272. if atomic_load_explicit(&o.done, .Acquire) == false {
  273. do_slow(o, fn)
  274. }
  275. }
  276. // once_do_with_data calls the procedure fn if and only if once_do_with_data is being called for the first for this instance of Once.
  277. once_do_with_data :: proc(o: ^Once, fn: proc(data: rawptr), data: rawptr) {
  278. @(cold)
  279. do_slow :: proc(o: ^Once, fn: proc(data: rawptr), data: rawptr) {
  280. guard(&o.m)
  281. if !o.done {
  282. fn(data)
  283. atomic_store_explicit(&o.done, true, .Release)
  284. }
  285. }
  286. if atomic_load_explicit(&o.done, .Acquire) == false {
  287. do_slow(o, fn, data)
  288. }
  289. }
  290. // once_do_with_data_contextless calls the procedure fn if and only if once_do_with_data_contextless is being called for the first for this instance of Once.
  291. once_do_with_data_contextless :: proc "contextless" (o: ^Once, fn: proc "contextless" (data: rawptr), data: rawptr) {
  292. @(cold)
  293. do_slow :: proc "contextless" (o: ^Once, fn: proc "contextless" (data: rawptr), data: rawptr) {
  294. guard(&o.m)
  295. if !o.done {
  296. fn(data)
  297. atomic_store_explicit(&o.done, true, .Release)
  298. }
  299. }
  300. if atomic_load_explicit(&o.done, .Acquire) == false {
  301. do_slow(o, fn, data)
  302. }
  303. }
  304. // A Parker is an associated token which is initially not present:
  305. // * The `park` procedure blocks the current thread unless or until the token
  306. // is available, at which point the token is consumed.
  307. // * The `park_with_timeout` procedures works the same as `park` but only
  308. // blocks for the specified duration.
  309. // * The `unpark` procedure automatically makes the token available if it
  310. // was not already.
  311. Parker :: struct #no_copy {
  312. state: Futex,
  313. }
  314. // Blocks the current thread until the token is made available.
  315. //
  316. // Assumes this is only called by the thread that owns the Parker.
  317. park :: proc "contextless" (p: ^Parker) {
  318. EMPTY :: 0
  319. NOTIFIED :: 1
  320. PARKED :: max(u32)
  321. if atomic_sub_explicit(&p.state, 1, .Acquire) == NOTIFIED {
  322. return
  323. }
  324. for {
  325. futex_wait(&p.state, PARKED)
  326. if _, ok := atomic_compare_exchange_strong_explicit(&p.state, NOTIFIED, EMPTY, .Acquire, .Acquire); ok {
  327. return
  328. }
  329. }
  330. }
  331. // Blocks the current thread until the token is made available, but only
  332. // for a limited duration.
  333. //
  334. // Assumes this is only called by the thread that owns the Parker
  335. park_with_timeout :: proc "contextless" (p: ^Parker, duration: time.Duration) {
  336. EMPTY :: 0
  337. NOTIFIED :: 1
  338. PARKED :: max(u32)
  339. if atomic_sub_explicit(&p.state, 1, .Acquire) == NOTIFIED {
  340. return
  341. }
  342. futex_wait_with_timeout(&p.state, PARKED, duration)
  343. atomic_exchange_explicit(&p.state, EMPTY, .Acquire)
  344. }
  345. // Automatically makes thee token available if it was not already.
  346. unpark :: proc "contextless" (p: ^Parker) {
  347. EMPTY :: 0
  348. NOTIFIED :: 1
  349. PARKED :: max(Futex)
  350. if atomic_exchange_explicit(&p.state, NOTIFIED, .Release) == PARKED {
  351. futex_signal(&p.state)
  352. }
  353. }
  354. // A One_Shot_Event is an associated token which is initially not present:
  355. // * The `one_shot_event_wait` blocks the current thread until the event
  356. // is made available
  357. // * The `one_shot_event_signal` procedure automatically makes the token
  358. // available if its was not already.
  359. One_Shot_Event :: struct #no_copy {
  360. state: Futex,
  361. }
  362. // Blocks the current thread until the event is made available with `one_shot_event_signal`.
  363. one_shot_event_wait :: proc "contextless" (e: ^One_Shot_Event) {
  364. for atomic_load_explicit(&e.state, .Acquire) == 0 {
  365. futex_wait(&e.state, 0)
  366. }
  367. }
  368. // Releases any threads that are currently blocked by this event with `one_shot_event_wait`.
  369. one_shot_event_signal :: proc "contextless" (e: ^One_Shot_Event) {
  370. atomic_store_explicit(&e.state, 1, .Release)
  371. futex_broadcast(&e.state)
  372. }