sync_windows.odin 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. when ODIN_OS == "windows" {
  2. import win32 "core:sys/windows.odin";
  3. }
  4. import "core:atomics.odin"
  5. Semaphore :: struct {
  6. _handle: win32.Handle,
  7. }
  8. /*
  9. Mutex :: struct {
  10. _semaphore: Semaphore,
  11. _counter: i32,
  12. _owner: i32,
  13. _recursion: i32,
  14. }
  15. */
  16. Mutex :: struct {
  17. _critical_section: win32.Critical_Section,
  18. }
  19. current_thread_id :: proc() -> i32 {
  20. return i32(win32.get_current_thread_id());
  21. }
  22. semaphore_init :: proc(s: ^Semaphore) {
  23. s._handle = win32.create_semaphore_a(nil, 0, 1<<31-1, nil);
  24. }
  25. semaphore_destroy :: proc(s: ^Semaphore) {
  26. win32.close_handle(s._handle);
  27. }
  28. semaphore_post :: proc(s: ^Semaphore, count: int) {
  29. win32.release_semaphore(s._handle, i32(count), nil);
  30. }
  31. semaphore_release :: inline proc(s: ^Semaphore) {
  32. semaphore_post(s, 1);
  33. }
  34. semaphore_wait :: proc(s: ^Semaphore) {
  35. win32.wait_for_single_object(s._handle, win32.INFINITE);
  36. }
  37. mutex_init :: proc(m: ^Mutex, spin_count := 0) {
  38. win32.initialize_critical_section_and_spin_count(&m._critical_section, u32(spin_count));
  39. }
  40. mutex_destroy :: proc(m: ^Mutex) {
  41. win32.delete_critical_section(&m._critical_section);
  42. }
  43. mutex_lock :: proc(m: ^Mutex) {
  44. win32.enter_critical_section(&m._critical_section);
  45. }
  46. mutex_try_lock :: proc(m: ^Mutex) -> bool {
  47. return bool(win32.try_enter_critical_section(&m._critical_section));
  48. }
  49. mutex_unlock :: proc(m: ^Mutex) {
  50. win32.leave_critical_section(&m._critical_section);
  51. }
  52. /*
  53. mutex_init :: proc(m: ^Mutex) {
  54. atomics.store(&m._counter, 0);
  55. atomics.store(&m._owner, current_thread_id());
  56. semaphore_init(&m._semaphore);
  57. m._recursion = 0;
  58. }
  59. mutex_destroy :: proc(m: ^Mutex) {
  60. semaphore_destroy(&m._semaphore);
  61. }
  62. mutex_lock :: proc(m: ^Mutex) {
  63. thread_id := current_thread_id();
  64. if atomics.fetch_add(&m._counter, 1) > 0 {
  65. if thread_id != atomics.load(&m._owner) {
  66. semaphore_wait(&m._semaphore);
  67. }
  68. }
  69. atomics.store(&m._owner, thread_id);
  70. m._recursion++;
  71. }
  72. mutex_try_lock :: proc(m: ^Mutex) -> bool {
  73. thread_id := current_thread_id();
  74. if atomics.load(&m._owner) == thread_id {
  75. atomics.fetch_add(&m._counter, 1);
  76. } else {
  77. expected: i32 = 0;
  78. if atomics.load(&m._counter) != 0 {
  79. return false;
  80. }
  81. if atomics.compare_exchange(&m._counter, expected, 1) == 0 {
  82. return false;
  83. }
  84. atomics.store(&m._owner, thread_id);
  85. }
  86. m._recursion++;
  87. return true;
  88. }
  89. mutex_unlock :: proc(m: ^Mutex) {
  90. recursion: i32;
  91. thread_id := current_thread_id();
  92. assert(thread_id == atomics.load(&m._owner));
  93. m._recursion--;
  94. recursion = m._recursion;
  95. if recursion == 0 {
  96. atomics.store(&m._owner, thread_id);
  97. }
  98. if atomics.fetch_add(&m._counter, -1) > 1 {
  99. if recursion == 0 {
  100. semaphore_release(&m._semaphore);
  101. }
  102. }
  103. }
  104. */