sync_windows.odin 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. import (
  2. win32 "sys/windows.odin" when ODIN_OS == "windows";
  3. "atomics.odin";
  4. )
  5. Semaphore :: struct {
  6. _handle: win32.Handle;
  7. }
  8. /*
  9. Mutex :: struct {
  10. _semaphore: Semaphore;
  11. _counter: i32;
  12. _owner: i32;
  13. _recursion: i32;
  14. }
  15. */
  16. Mutex :: struct {
  17. _critical_section: win32.CriticalSection;
  18. }
  19. current_thread_id :: proc() -> i32 {
  20. return i32(win32.get_current_thread_id());
  21. }
  22. semaphore_init :: proc(s: ^Semaphore) {
  23. s._handle = win32.create_semaphore_a(nil, 0, 1<<31-1, nil);
  24. }
  25. semaphore_destroy :: proc(s: ^Semaphore) {
  26. win32.close_handle(s._handle);
  27. }
  28. semaphore_post :: proc(s: ^Semaphore, count: int) {
  29. win32.release_semaphore(s._handle, i32(count), nil);
  30. }
  31. semaphore_release :: proc(s: ^Semaphore) #inline { semaphore_post(s, 1); }
  32. semaphore_wait :: proc(s: ^Semaphore) {
  33. win32.wait_for_single_object(s._handle, win32.INFINITE);
  34. }
  35. mutex_init :: proc(m: ^Mutex, spin_count := 0) {
  36. win32.initialize_critical_section_and_spin_count(&m._critical_section, u32(spin_count));
  37. }
  38. mutex_destroy :: proc(m: ^Mutex) {
  39. win32.delete_critical_section(&m._critical_section);
  40. }
  41. mutex_lock :: proc(m: ^Mutex) {
  42. win32.enter_critical_section(&m._critical_section);
  43. }
  44. mutex_try_lock :: proc(m: ^Mutex) -> bool {
  45. return win32.try_enter_critical_section(&m._critical_section) != 0;
  46. }
  47. mutex_unlock :: proc(m: ^Mutex) {
  48. win32.leave_critical_section(&m._critical_section);
  49. }
  50. /*
  51. mutex_init :: proc(m: ^Mutex) {
  52. atomics.store(&m._counter, 0);
  53. atomics.store(&m._owner, current_thread_id());
  54. semaphore_init(&m._semaphore);
  55. m._recursion = 0;
  56. }
  57. mutex_destroy :: proc(m: ^Mutex) {
  58. semaphore_destroy(&m._semaphore);
  59. }
  60. mutex_lock :: proc(m: ^Mutex) {
  61. thread_id := current_thread_id();
  62. if atomics.fetch_add(&m._counter, 1) > 0 {
  63. if thread_id != atomics.load(&m._owner) {
  64. semaphore_wait(&m._semaphore);
  65. }
  66. }
  67. atomics.store(&m._owner, thread_id);
  68. m._recursion++;
  69. }
  70. mutex_try_lock :: proc(m: ^Mutex) -> bool {
  71. thread_id := current_thread_id();
  72. if atomics.load(&m._owner) == thread_id {
  73. atomics.fetch_add(&m._counter, 1);
  74. } else {
  75. expected: i32 = 0;
  76. if atomics.load(&m._counter) != 0 {
  77. return false;
  78. }
  79. if atomics.compare_exchange(&m._counter, expected, 1) == 0 {
  80. return false;
  81. }
  82. atomics.store(&m._owner, thread_id);
  83. }
  84. m._recursion++;
  85. return true;
  86. }
  87. mutex_unlock :: proc(m: ^Mutex) {
  88. recursion: i32;
  89. thread_id := current_thread_id();
  90. assert(thread_id == atomics.load(&m._owner));
  91. m._recursion--;
  92. recursion = m._recursion;
  93. if recursion == 0 {
  94. atomics.store(&m._owner, thread_id);
  95. }
  96. if atomics.fetch_add(&m._counter, -1) > 1 {
  97. if recursion == 0 {
  98. semaphore_release(&m._semaphore);
  99. }
  100. }
  101. }
  102. */