threading.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. #if defined(GB_SYSTEM_LINUX)
  2. #include <signal.h>
  3. #endif
  4. #if defined(GB_SYSTEM_WINDOWS)
  5. #pragma warning(push)
  6. #pragma warning(disable: 4505)
  7. #endif
  8. struct BlockingMutex;
  9. struct RecursiveMutex;
  10. struct Semaphore;
  11. struct Condition;
  12. struct Thread;
  13. struct ThreadPool;
  14. #define THREAD_PROC(name) isize name(struct Thread *thread)
  15. gb_internal THREAD_PROC(thread_pool_thread_proc);
  16. #define WORKER_TASK_PROC(name) isize name(void *data)
  17. typedef WORKER_TASK_PROC(WorkerTaskProc);
  18. typedef struct WorkerTask {
  19. WorkerTaskProc *do_work;
  20. void *data;
  21. } WorkerTask;
  22. struct Thread {
  23. #if defined(GB_SYSTEM_WINDOWS)
  24. void *win32_handle;
  25. #else
  26. pthread_t posix_handle;
  27. #endif
  28. isize idx;
  29. WorkerTask *queue;
  30. size_t capacity;
  31. std::atomic<uint64_t> head_and_tail;
  32. isize stack_size;
  33. struct ThreadPool *pool;
  34. };
  35. typedef std::atomic<i32> Futex;
  36. typedef volatile i32 Footex;
  37. gb_internal void futex_wait(Futex *addr, Footex val);
  38. gb_internal void futex_signal(Futex *addr);
  39. gb_internal void futex_broadcast(Futex *addr);
  40. gb_internal void mutex_lock (BlockingMutex *m);
  41. gb_internal bool mutex_try_lock(BlockingMutex *m);
  42. gb_internal void mutex_unlock (BlockingMutex *m);
  43. gb_internal void mutex_lock (RecursiveMutex *m);
  44. gb_internal bool mutex_try_lock(RecursiveMutex *m);
  45. gb_internal void mutex_unlock (RecursiveMutex *m);
  46. gb_internal void semaphore_post (Semaphore *s, i32 count);
  47. gb_internal void semaphore_wait (Semaphore *s);
  48. gb_internal void semaphore_release(Semaphore *s) { semaphore_post(s, 1); }
  49. gb_internal void condition_broadcast(Condition *c);
  50. gb_internal void condition_signal(Condition *c);
  51. gb_internal void condition_wait(Condition *c, BlockingMutex *m);
  52. gb_internal u32 thread_current_id(void);
  53. gb_internal void thread_init (ThreadPool *pool, Thread *t, isize idx);
  54. gb_internal void thread_init_and_start (ThreadPool *pool, Thread *t, isize idx);
  55. gb_internal void thread_join_and_destroy(Thread *t);
  56. gb_internal void thread_set_name (Thread *t, char const *name);
  57. gb_internal void yield_thread(void);
  58. gb_internal void yield_process(void);
  59. struct MutexGuard {
  60. MutexGuard() = delete;
  61. MutexGuard(MutexGuard const &) = delete;
  62. MutexGuard(BlockingMutex *bm) : bm{bm} {
  63. mutex_lock(this->bm);
  64. }
  65. MutexGuard(RecursiveMutex *rm) : rm{rm} {
  66. mutex_lock(this->rm);
  67. }
  68. MutexGuard(BlockingMutex &bm) : bm{&bm} {
  69. mutex_lock(this->bm);
  70. }
  71. MutexGuard(RecursiveMutex &rm) : rm{&rm} {
  72. mutex_lock(this->rm);
  73. }
  74. ~MutexGuard() {
  75. if (this->bm) {
  76. mutex_unlock(this->bm);
  77. } else if (this->rm) {
  78. mutex_unlock(this->rm);
  79. }
  80. }
  81. operator bool() const { return true; }
  82. BlockingMutex *bm;
  83. RecursiveMutex *rm;
  84. };
  85. #define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_){m})
  86. #define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_){m}
  87. struct RecursiveMutex {
  88. Futex owner;
  89. i32 recursion;
  90. };
  91. gb_internal void mutex_lock(RecursiveMutex *m) {
  92. Futex tid = cast(i32)thread_current_id();
  93. for (;;) {
  94. i32 prev_owner = 0;
  95. m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire);
  96. if (prev_owner == 0 || prev_owner == tid) {
  97. m->recursion++;
  98. // inside the lock
  99. return;
  100. }
  101. futex_wait(&m->owner, prev_owner);
  102. }
  103. }
  104. gb_internal bool mutex_try_lock(RecursiveMutex *m) {
  105. Futex tid = cast(i32)thread_current_id();
  106. i32 prev_owner = 0;
  107. m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire);
  108. if (prev_owner == 0 || prev_owner == tid) {
  109. m->recursion++;
  110. // inside the lock
  111. return true;
  112. }
  113. return false;
  114. }
  115. gb_internal void mutex_unlock(RecursiveMutex *m) {
  116. m->recursion--;
  117. if (m->recursion != 0) {
  118. return;
  119. }
  120. m->owner.exchange(0, std::memory_order_release);
  121. futex_signal(&m->owner);
  122. // outside the lock
  123. }
  124. struct Semaphore {
  125. Futex count;
  126. };
  127. gb_internal void semaphore_post(Semaphore *s, i32 count) {
  128. s->count.fetch_add(count, std::memory_order_release);
  129. if (s->count == 1) {
  130. futex_signal(&s->count);
  131. } else {
  132. futex_broadcast(&s->count);
  133. }
  134. }
  135. gb_internal void semaphore_wait(Semaphore *s) {
  136. for (;;) {
  137. i32 original_count = s->count.load(std::memory_order_relaxed);
  138. while (original_count == 0) {
  139. futex_wait(&s->count, original_count);
  140. original_count = s->count;
  141. }
  142. if (!s->count.compare_exchange_strong(original_count, original_count-1, std::memory_order_acquire, std::memory_order_acquire)) {
  143. return;
  144. }
  145. }
  146. }
  147. #if defined(GB_SYSTEM_WINDOWS)
  148. struct BlockingMutex {
  149. SRWLOCK srwlock;
  150. };
  151. gb_internal void mutex_lock(BlockingMutex *m) {
  152. AcquireSRWLockExclusive(&m->srwlock);
  153. }
  154. gb_internal bool mutex_try_lock(BlockingMutex *m) {
  155. return !!TryAcquireSRWLockExclusive(&m->srwlock);
  156. }
  157. gb_internal void mutex_unlock(BlockingMutex *m) {
  158. ReleaseSRWLockExclusive(&m->srwlock);
  159. }
  160. struct Condition {
  161. CONDITION_VARIABLE cond;
  162. };
  163. gb_internal void condition_broadcast(Condition *c) {
  164. WakeAllConditionVariable(&c->cond);
  165. }
  166. gb_internal void condition_signal(Condition *c) {
  167. WakeConditionVariable(&c->cond);
  168. }
  169. gb_internal void condition_wait(Condition *c, BlockingMutex *m) {
  170. SleepConditionVariableSRW(&c->cond, &m->srwlock, INFINITE, 0);
  171. }
  172. #else
  173. enum Internal_Mutex_State : i32 {
  174. Internal_Mutex_State_Unlocked = 0,
  175. Internal_Mutex_State_Locked = 1,
  176. Internal_Mutex_State_Waiting = 2,
  177. };
  178. struct BlockingMutex {
  179. i32 state_;
  180. Futex &state() {
  181. return *(Futex *)&this->state_;
  182. }
  183. Futex const &state() const {
  184. return *(Futex const *)&this->state_;
  185. }
  186. };
  187. gb_no_inline gb_internal void mutex_lock_slow(BlockingMutex *m, i32 curr_state) {
  188. i32 new_state = curr_state;
  189. for (i32 spin = 0; spin < 100; spin++) {
  190. i32 state = Internal_Mutex_State_Unlocked;
  191. bool ok = m->state().compare_exchange_weak(state, new_state, std::memory_order_acquire, std::memory_order_consume);
  192. if (ok) {
  193. return;
  194. }
  195. if (state == Internal_Mutex_State_Waiting) {
  196. break;
  197. }
  198. for (i32 i = gb_min(spin+1, 32); i > 0; i--) {
  199. yield_thread();
  200. }
  201. }
  202. // Set just in case 100 iterations did not do it
  203. new_state = Internal_Mutex_State_Waiting;
  204. for (;;) {
  205. if (m->state().exchange(Internal_Mutex_State_Waiting, std::memory_order_acquire) == Internal_Mutex_State_Unlocked) {
  206. return;
  207. }
  208. futex_wait(&m->state(), new_state);
  209. yield_thread();
  210. }
  211. }
  212. gb_internal void mutex_lock(BlockingMutex *m) {
  213. i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire);
  214. if (v != Internal_Mutex_State_Unlocked) {
  215. mutex_lock_slow(m, v);
  216. }
  217. }
  218. gb_internal bool mutex_try_lock(BlockingMutex *m) {
  219. i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire);
  220. return v == Internal_Mutex_State_Unlocked;
  221. }
  222. gb_no_inline gb_internal void mutex_unlock_slow(BlockingMutex *m) {
  223. futex_signal(&m->state());
  224. }
  225. gb_internal void mutex_unlock(BlockingMutex *m) {
  226. i32 v = m->state().exchange(Internal_Mutex_State_Unlocked, std::memory_order_release);
  227. switch (v) {
  228. case Internal_Mutex_State_Unlocked:
  229. GB_PANIC("Unreachable");
  230. break;
  231. case Internal_Mutex_State_Locked:
  232. // Okay
  233. break;
  234. case Internal_Mutex_State_Waiting:
  235. mutex_unlock_slow(m);
  236. break;
  237. }
  238. }
  239. struct Condition {
  240. i32 state_;
  241. Futex &state() {
  242. return *(Futex *)&this->state_;
  243. }
  244. Futex const &state() const {
  245. return *(Futex const *)&this->state_;
  246. }
  247. };
  248. gb_internal void condition_broadcast(Condition *c) {
  249. c->state().fetch_add(1, std::memory_order_release);
  250. futex_broadcast(&c->state());
  251. }
  252. gb_internal void condition_signal(Condition *c) {
  253. c->state().fetch_add(1, std::memory_order_release);
  254. futex_signal(&c->state());
  255. }
  256. gb_internal void condition_wait(Condition *c, BlockingMutex *m) {
  257. i32 state = c->state().load(std::memory_order_relaxed);
  258. mutex_unlock(m);
  259. futex_wait(&c->state(), state);
  260. mutex_lock(m);
  261. }
  262. #endif
  263. gb_internal u32 thread_current_id(void) {
  264. u32 thread_id;
  265. #if defined(GB_SYSTEM_WINDOWS)
  266. #if defined(GB_ARCH_32_BIT) && defined(GB_CPU_X86)
  267. thread_id = (cast(u32 *)__readfsdword(24))[9];
  268. #elif defined(GB_ARCH_64_BIT) && defined(GB_CPU_X86)
  269. thread_id = (cast(u32 *)__readgsqword(48))[18];
  270. #else
  271. thread_id = GetCurrentThreadId();
  272. #endif
  273. #elif defined(GB_SYSTEM_OSX) && defined(GB_ARCH_64_BIT)
  274. thread_id = pthread_mach_thread_np(pthread_self());
  275. #elif defined(GB_ARCH_32_BIT) && defined(GB_CPU_X86)
  276. __asm__("mov %%gs:0x08,%0" : "=r"(thread_id));
  277. #elif defined(GB_ARCH_64_BIT) && defined(GB_CPU_X86)
  278. __asm__("mov %%fs:0x10,%0" : "=r"(thread_id));
  279. #elif defined(GB_SYSTEM_LINUX)
  280. thread_id = gettid();
  281. #else
  282. #error Unsupported architecture for thread_current_id()
  283. #endif
  284. return thread_id;
  285. }
  286. gb_internal gb_inline void yield_thread(void) {
  287. #if defined(GB_SYSTEM_WINDOWS)
  288. _mm_pause();
  289. #elif defined(GB_SYSTEM_OSX)
  290. #if defined(GB_CPU_X86)
  291. __asm__ volatile ("" : : : "memory");
  292. #elif defined(GB_CPU_ARM)
  293. __asm__ volatile ("yield" : : : "memory");
  294. #endif
  295. #elif defined(GB_CPU_X86)
  296. _mm_pause();
  297. #elif defined(GB_CPU_ARM)
  298. __asm__ volatile ("yield" : : : "memory");
  299. #else
  300. #error Unknown architecture
  301. #endif
  302. }
  303. gb_internal gb_inline void yield(void) {
  304. #if defined(GB_SYSTEM_WINDOWS)
  305. YieldProcessor();
  306. #else
  307. sched_yield();
  308. #endif
  309. }
  310. #if defined(GB_SYSTEM_WINDOWS)
  311. gb_internal DWORD __stdcall internal_thread_proc(void *arg) {
  312. Thread *t = cast(Thread *)arg;
  313. thread_pool_thread_proc(t);
  314. return 0;
  315. }
  316. #else
  317. gb_internal void *internal_thread_proc(void *arg) {
  318. #if (GB_SYSTEM_LINUX)
  319. // NOTE: Don't permit any signal delivery to threads on Linux.
  320. sigset_t mask = {};
  321. sigfillset(&mask);
  322. GB_ASSERT_MSG(pthread_sigmask(SIG_BLOCK, &mask, nullptr) == 0, "failed to block signals");
  323. #endif
  324. Thread *t = cast(Thread *)arg;
  325. thread_pool_thread_proc(t);
  326. return NULL;
  327. }
  328. #endif
  329. gb_internal void thread_init(ThreadPool *pool, Thread *t, isize idx) {
  330. gb_zero_item(t);
  331. #if defined(GB_SYSTEM_WINDOWS)
  332. t->win32_handle = INVALID_HANDLE_VALUE;
  333. #else
  334. t->posix_handle = 0;
  335. #endif
  336. t->capacity = 1 << 14; // must be a power of 2
  337. t->queue = gb_alloc_array(heap_allocator(), WorkerTask, t->capacity);
  338. t->head_and_tail = 0;
  339. t->pool = pool;
  340. t->idx = idx;
  341. }
  342. gb_internal void thread_init_and_start(ThreadPool *pool, Thread *t, isize idx) {
  343. thread_init(pool, t, idx);
  344. isize stack_size = 0;
  345. #if defined(GB_SYSTEM_WINDOWS)
  346. t->win32_handle = CreateThread(NULL, stack_size, internal_thread_proc, t, 0, NULL);
  347. GB_ASSERT_MSG(t->win32_handle != NULL, "CreateThread: GetLastError");
  348. #else
  349. {
  350. pthread_attr_t attr;
  351. pthread_attr_init(&attr);
  352. defer (pthread_attr_destroy(&attr));
  353. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
  354. if (stack_size != 0) {
  355. pthread_attr_setstacksize(&attr, stack_size);
  356. }
  357. pthread_create(&t->posix_handle, &attr, internal_thread_proc, t);
  358. }
  359. #endif
  360. }
  361. gb_internal void thread_join_and_destroy(Thread *t) {
  362. #if defined(GB_SYSTEM_WINDOWS)
  363. WaitForSingleObject(t->win32_handle, INFINITE);
  364. CloseHandle(t->win32_handle);
  365. t->win32_handle = INVALID_HANDLE_VALUE;
  366. #else
  367. pthread_join(t->posix_handle, NULL);
  368. t->posix_handle = 0;
  369. #endif
  370. gb_free(heap_allocator(), t->queue);
  371. }
  372. gb_internal void thread_set_name(Thread *t, char const *name) {
  373. #if defined(GB_COMPILER_MSVC)
  374. #pragma pack(push, 8)
  375. typedef struct {
  376. DWORD type;
  377. char const *name;
  378. DWORD id;
  379. DWORD flags;
  380. } gbprivThreadName;
  381. #pragma pack(pop)
  382. gbprivThreadName tn;
  383. tn.type = 0x1000;
  384. tn.name = name;
  385. tn.id = GetThreadId(cast(HANDLE)t->win32_handle);
  386. tn.flags = 0;
  387. __try {
  388. RaiseException(0x406d1388, 0, gb_size_of(tn)/4, cast(ULONG_PTR *)&tn);
  389. } __except(1 /*EXCEPTION_EXECUTE_HANDLER*/) {
  390. }
  391. #elif defined(GB_SYSTEM_WINDOWS) && !defined(GB_COMPILER_MSVC)
  392. // IMPORTANT TODO(bill): Set thread name for GCC/Clang on windows
  393. return;
  394. #elif defined(GB_SYSTEM_OSX)
  395. // TODO(bill): Test if this works
  396. pthread_setname_np(name);
  397. #elif defined(GB_SYSTEM_FREEBSD) || defined(GB_SYSTEM_OPENBSD)
  398. pthread_set_name_np(t->posix_handle, name);
  399. #else
  400. // TODO(bill): Test if this works
  401. pthread_setname_np(t->posix_handle, name);
  402. #endif
  403. }
  404. #if defined(GB_SYSTEM_LINUX)
  405. #include <linux/futex.h>
  406. #include <sys/syscall.h>
  407. gb_internal void futex_signal(Futex *addr) {
  408. for (;;) {
  409. int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL, 0);
  410. if (ret == -1) {
  411. perror("Futex wake");
  412. GB_PANIC("Failed in futex wake!\n");
  413. } else if (ret > 0) {
  414. return;
  415. }
  416. }
  417. }
  418. gb_internal void futex_broadcast(Futex *addr) {
  419. for (;;) {
  420. int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL, 0);
  421. if (ret == -1) {
  422. perror("Futex wake");
  423. GB_PANIC("Failed in futex wake!\n");
  424. } else if (ret > 0) {
  425. return;
  426. }
  427. }
  428. }
  429. gb_internal void futex_wait(Futex *addr, Footex val) {
  430. for (;;) {
  431. int ret = syscall(SYS_futex, addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL, 0);
  432. if (ret == -1) {
  433. if (errno != EAGAIN) {
  434. perror("Futex wait");
  435. GB_PANIC("Failed in futex wait!\n");
  436. } else {
  437. return;
  438. }
  439. } else if (ret == 0) {
  440. if (*addr != val) {
  441. return;
  442. }
  443. }
  444. }
  445. }
  446. #elif defined(GB_SYSTEM_FREEBSD)
  447. #include <sys/types.h>
  448. #include <sys/umtx.h>
  449. gb_internal void futex_signal(Futex *addr) {
  450. _umtx_op(addr, UMTX_OP_WAKE, 1, 0, 0);
  451. }
  452. gb_internal void futex_broadcast(Futex *addr) {
  453. _umtx_op(addr, UMTX_OP_WAKE, INT32_MAX, 0, 0);
  454. }
  455. gb_internal void futex_wait(Futex *addr, Footex val) {
  456. for (;;) {
  457. int ret = _umtx_op(addr, UMTX_OP_WAIT_UINT, val, 0, NULL);
  458. if (ret == 0) {
  459. if (errno == ETIMEDOUT || errno == EINTR) {
  460. continue;
  461. }
  462. perror("Futex wait");
  463. GB_PANIC("Failed in futex wait!\n");
  464. } else if (ret == 0) {
  465. if (*addr != val) {
  466. return;
  467. }
  468. }
  469. }
  470. }
  471. #elif defined(GB_SYSTEM_OPENBSD)
  472. #include <sys/futex.h>
  473. gb_internal void futex_signal(Futex *addr) {
  474. for (;;) {
  475. int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL);
  476. if (ret == -1) {
  477. if (errno == ETIMEDOUT || errno == EINTR) {
  478. continue;
  479. }
  480. perror("Futex wake");
  481. GB_PANIC("futex wake fail");
  482. } else if (ret == 1) {
  483. return;
  484. }
  485. }
  486. }
  487. gb_internal void futex_broadcast(Futex *addr) {
  488. for (;;) {
  489. int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL);
  490. if (ret == -1) {
  491. if (errno == ETIMEDOUT || errno == EINTR) {
  492. continue;
  493. }
  494. perror("Futex wake");
  495. GB_PANIC("futex wake fail");
  496. } else if (ret == 1) {
  497. return;
  498. }
  499. }
  500. }
  501. gb_internal void futex_wait(Futex *addr, Footex val) {
  502. for (;;) {
  503. int ret = futex((volatile uint32_t *)addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL);
  504. if (ret == -1) {
  505. if (*addr != val) {
  506. return;
  507. }
  508. if (errno == ETIMEDOUT || errno == EINTR) {
  509. continue;
  510. }
  511. perror("Futex wait");
  512. GB_PANIC("Failed in futex wait!\n");
  513. }
  514. }
  515. }
  516. #elif defined(GB_SYSTEM_OSX)
  517. #define UL_COMPARE_AND_WAIT 0x00000001
  518. #define ULF_NO_ERRNO 0x01000000
  519. extern "C" int __ulock_wait(uint32_t operation, void *addr, uint64_t value, uint32_t timeout); /* timeout is specified in microseconds */
  520. extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value);
  521. gb_internal void futex_signal(Futex *addr) {
  522. for (;;) {
  523. int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0);
  524. if (ret >= 0) {
  525. return;
  526. }
  527. if (ret == EINTR || ret == EFAULT) {
  528. continue;
  529. }
  530. if (ret == ENOENT) {
  531. return;
  532. }
  533. GB_PANIC("Failed in futex wake!\n");
  534. }
  535. }
  536. gb_internal void futex_broadcast(Futex *addr) {
  537. for (;;) {
  538. int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0);
  539. if (ret >= 0) {
  540. return;
  541. }
  542. if (ret == EINTR || ret == EFAULT) {
  543. continue;
  544. }
  545. if (ret == ENOENT) {
  546. return;
  547. }
  548. GB_PANIC("Failed in futex wake!\n");
  549. }
  550. }
  551. gb_internal void futex_wait(Futex *addr, Footex val) {
  552. for (;;) {
  553. int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, addr, val, 0);
  554. if (ret >= 0) {
  555. if (*addr != val) {
  556. return;
  557. }
  558. continue;
  559. }
  560. if (ret == EINTR || ret == EFAULT) {
  561. continue;
  562. }
  563. if (ret == ENOENT) {
  564. return;
  565. }
  566. GB_PANIC("Failed in futex wait!\n");
  567. }
  568. }
  569. #elif defined(GB_SYSTEM_WINDOWS)
  570. gb_internal void futex_signal(Futex *addr) {
  571. WakeByAddressSingle((void *)addr);
  572. }
  573. gb_internal void futex_broadcast(Futex *addr) {
  574. WakeByAddressAll((void *)addr);
  575. }
  576. gb_internal void futex_wait(Futex *addr, Footex val) {
  577. for (;;) {
  578. WaitOnAddress(addr, (void *)&val, sizeof(val), INFINITE);
  579. if (*addr != val) break;
  580. }
  581. }
  582. #endif
  583. #if defined(GB_SYSTEM_WINDOWS)
  584. #pragma warning(pop)
  585. #endif