primitives_internal.odin 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. //+private
  2. package sync2
  3. when #config(ODIN_SYNC_RECURSIVE_MUTEX_USE_FUTEX, true) {
  4. _Recursive_Mutex :: struct {
  5. owner: Futex,
  6. recursion: i32,
  7. }
  8. _recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
  9. tid := Futex(current_thread_id())
  10. for {
  11. prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0)
  12. switch prev_owner {
  13. case 0, tid:
  14. m.impl.recursion += 1
  15. // inside the lock
  16. return
  17. }
  18. futex_wait(&m.impl.owner, u32(prev_owner))
  19. }
  20. }
  21. _recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
  22. m.impl.recursion -= 1
  23. if m.impl.recursion != 0 {
  24. return
  25. }
  26. atomic_exchange_release(&m.impl.owner, 0)
  27. futex_signal(&m.impl.owner)
  28. // outside the lock
  29. }
  30. _recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
  31. tid := Futex(current_thread_id())
  32. prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0)
  33. switch prev_owner {
  34. case 0, tid:
  35. m.impl.recursion += 1
  36. // inside the lock
  37. return true
  38. }
  39. return false
  40. }
  41. } else {
  42. _Recursive_Mutex :: struct {
  43. owner: int,
  44. recursion: int,
  45. mutex: Mutex,
  46. }
  47. _recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
  48. tid := current_thread_id()
  49. if tid != m.impl.owner {
  50. mutex_lock(&m.impl.mutex)
  51. }
  52. // inside the lock
  53. m.impl.owner = tid
  54. m.impl.recursion += 1
  55. }
  56. _recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
  57. tid := current_thread_id()
  58. assert(tid == m.impl.owner)
  59. m.impl.recursion -= 1
  60. recursion := m.impl.recursion
  61. if recursion == 0 {
  62. m.impl.owner = 0
  63. }
  64. if recursion == 0 {
  65. mutex_unlock(&m.impl.mutex)
  66. }
  67. // outside the lock
  68. }
  69. _recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
  70. tid := current_thread_id()
  71. if m.impl.owner == tid {
  72. return mutex_try_lock(&m.impl.mutex)
  73. }
  74. if !mutex_try_lock(&m.impl.mutex) {
  75. return false
  76. }
  77. // inside the lock
  78. m.impl.owner = tid
  79. m.impl.recursion += 1
  80. return true
  81. }
  82. }
  83. when ODIN_OS != "windows" {
  84. RW_Mutex_State :: distinct uint
  85. RW_Mutex_State_Half_Width :: size_of(RW_Mutex_State)*8/2
  86. RW_Mutex_State_Is_Writing :: RW_Mutex_State(1)
  87. RW_Mutex_State_Writer :: RW_Mutex_State(1)<<1
  88. RW_Mutex_State_Reader :: RW_Mutex_State(1)<<RW_Mutex_State_Half_Width
  89. RW_Mutex_State_Writer_Mask :: RW_Mutex_State(1<<(RW_Mutex_State_Half_Width-1) - 1) << 1
  90. RW_Mutex_State_Reader_Mask :: RW_Mutex_State(1<<(RW_Mutex_State_Half_Width-1) - 1) << RW_Mutex_State_Half_Width
  91. _RW_Mutex :: struct {
  92. // NOTE(bill): pthread_rwlock_t cannot be used since pthread_rwlock_destroy is required on some platforms
  93. // TODO(bill): Can we determine which platforms exactly?
  94. state: RW_Mutex_State,
  95. mutex: Mutex,
  96. sema: Sema,
  97. }
  98. _rw_mutex_lock :: proc(rw: ^RW_Mutex) {
  99. _ = atomic_add(&rw.impl.state, RW_Mutex_State_Writer)
  100. mutex_lock(&rw.impl.mutex)
  101. state := atomic_or(&rw.impl.state, RW_Mutex_State_Writer)
  102. if state & RW_Mutex_State_Reader_Mask != 0 {
  103. sema_wait(&rw.impl.sema)
  104. }
  105. }
  106. _rw_mutex_unlock :: proc(rw: ^RW_Mutex) {
  107. _ = atomic_and(&rw.impl.state, ~RW_Mutex_State_Is_Writing)
  108. mutex_unlock(&rw.impl.mutex)
  109. }
  110. _rw_mutex_try_lock :: proc(rw: ^RW_Mutex) -> bool {
  111. if mutex_try_lock(&rw.impl.mutex) {
  112. state := atomic_load(&rw.impl.state)
  113. if state & RW_Mutex_State_Reader_Mask == 0 {
  114. _ = atomic_or(&rw.impl.state, RW_Mutex_State_Is_Writing)
  115. return true
  116. }
  117. mutex_unlock(&rw.impl.mutex)
  118. }
  119. return false
  120. }
  121. _rw_mutex_shared_lock :: proc(rw: ^RW_Mutex) {
  122. state := atomic_load(&rw.impl.state)
  123. for state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
  124. ok: bool
  125. state, ok = atomic_compare_exchange_weak(&rw.impl.state, state, state + RW_Mutex_State_Reader)
  126. if ok {
  127. return
  128. }
  129. }
  130. mutex_lock(&rw.impl.mutex)
  131. _ = atomic_add(&rw.impl.state, RW_Mutex_State_Reader)
  132. mutex_unlock(&rw.impl.mutex)
  133. }
  134. _rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
  135. state := atomic_sub(&rw.impl.state, RW_Mutex_State_Reader)
  136. if (state & RW_Mutex_State_Reader_Mask == RW_Mutex_State_Reader) &&
  137. (state & RW_Mutex_State_Is_Writing != 0) {
  138. sema_post(&rw.impl.sema)
  139. }
  140. }
  141. _rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
  142. state := atomic_load(&rw.impl.state)
  143. if state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
  144. _, ok := atomic_compare_exchange_strong(&rw.impl.state, state, state + RW_Mutex_State_Reader)
  145. if ok {
  146. return true
  147. }
  148. }
  149. if mutex_try_lock(&rw.impl.mutex) {
  150. _ = atomic_add(&rw.impl.state, RW_Mutex_State_Reader)
  151. mutex_unlock(&rw.impl.mutex)
  152. return true
  153. }
  154. return false
  155. }
  156. }