sync.odin 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. package sync
  2. import "core:intrinsics"
  3. cpu_relax :: #force_inline proc "contextless" () {
  4. intrinsics.cpu_relax();
  5. }
  6. Condition_Mutex_Ptr :: union{^Mutex, ^Blocking_Mutex};
  7. Ticket_Mutex :: struct {
  8. ticket: u64,
  9. serving: u64,
  10. }
  11. ticket_mutex_init :: proc(m: ^Ticket_Mutex) {
  12. atomic_store(&m.ticket, 0, .Relaxed);
  13. atomic_store(&m.serving, 0, .Relaxed);
  14. }
  15. ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
  16. ticket := atomic_add(&m.ticket, 1, .Relaxed);
  17. for ticket != atomic_load(&m.serving, .Acquire) {
  18. intrinsics.cpu_relax();
  19. }
  20. }
  21. ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
  22. atomic_add(&m.serving, 1, .Relaxed);
  23. }
  24. Benaphore :: struct {
  25. counter: int,
  26. sema: Semaphore,
  27. }
  28. benaphore_init :: proc(b: ^Benaphore) {
  29. intrinsics.atomic_store(&b.counter, 0);
  30. semaphore_init(&b.sema);
  31. }
  32. benaphore_destroy :: proc(b: ^Benaphore) {
  33. semaphore_destroy(&b.sema);
  34. }
  35. benaphore_lock :: proc(b: ^Benaphore) {
  36. if intrinsics.atomic_add_acq(&b.counter, 1) > 1 {
  37. semaphore_wait_for(&b.sema);
  38. }
  39. }
  40. benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
  41. v, _ := intrinsics.atomic_cxchg_acq(&b.counter, 1, 0);
  42. return v == 0;
  43. }
  44. benaphore_unlock :: proc(b: ^Benaphore) {
  45. if intrinsics.atomic_sub_rel(&b.counter, 1) > 0 {
  46. semaphore_post(&b.sema);
  47. }
  48. }
  49. Recursive_Benaphore :: struct {
  50. counter: int,
  51. owner: int,
  52. recursion: int,
  53. sema: Semaphore,
  54. }
  55. recursive_benaphore_init :: proc(b: ^Recursive_Benaphore) {
  56. intrinsics.atomic_store(&b.counter, 0);
  57. semaphore_init(&b.sema);
  58. }
  59. recursive_benaphore_destroy :: proc(b: ^Recursive_Benaphore) {
  60. semaphore_destroy(&b.sema);
  61. }
  62. recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
  63. tid := current_thread_id();
  64. if intrinsics.atomic_add_acq(&b.counter, 1) > 1 {
  65. if tid != b.owner {
  66. semaphore_wait_for(&b.sema);
  67. }
  68. }
  69. // inside the lock
  70. b.owner = tid;
  71. b.recursion += 1;
  72. }
  73. recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
  74. tid := current_thread_id();
  75. if b.owner == tid {
  76. intrinsics.atomic_add_acq(&b.counter, 1);
  77. } else {
  78. v, _ := intrinsics.atomic_cxchg_acq(&b.counter, 1, 0);
  79. if v != 0 {
  80. return false;
  81. }
  82. // inside the lock
  83. b.owner = tid;
  84. }
  85. b.recursion += 1;
  86. return true;
  87. }
  88. recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
  89. tid := current_thread_id();
  90. assert(tid == b.owner);
  91. b.recursion -= 1;
  92. recursion := b.recursion;
  93. if recursion == 0 {
  94. b.owner = 0;
  95. }
  96. if intrinsics.atomic_sub_rel(&b.counter, 1) > 0 {
  97. if recursion == 0 {
  98. semaphore_post(&b.sema);
  99. }
  100. }
  101. // outside the lock
  102. }