threading.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. #if defined(GB_SYSTEM_LINUX)
  2. #include <signal.h>
  3. #endif
  4. #if defined(GB_SYSTEM_WINDOWS)
  5. #pragma warning(push)
  6. #pragma warning(disable: 4505)
  7. #endif
  8. struct BlockingMutex;
  9. struct RecursiveMutex;
  10. struct Semaphore;
  11. struct Condition;
  12. struct Thread;
  13. struct ThreadPool;
  14. #define THREAD_PROC(name) isize name(struct Thread *thread)
  15. gb_internal THREAD_PROC(thread_pool_thread_proc);
  16. #define WORKER_TASK_PROC(name) isize name(void *data)
  17. typedef WORKER_TASK_PROC(WorkerTaskProc);
  18. typedef struct WorkerTask {
  19. WorkerTaskProc *do_work;
  20. void *data;
  21. } WorkerTask;
  22. struct Thread {
  23. #if defined(GB_SYSTEM_WINDOWS)
  24. void *win32_handle;
  25. #else
  26. pthread_t posix_handle;
  27. #endif
  28. isize idx;
  29. WorkerTask *queue;
  30. size_t capacity;
  31. std::atomic<uint64_t> head_and_tail;
  32. isize stack_size;
  33. struct ThreadPool *pool;
  34. };
  35. gb_internal void mutex_init (BlockingMutex *m);
  36. gb_internal void mutex_destroy (BlockingMutex *m);
  37. gb_internal void mutex_lock (BlockingMutex *m);
  38. gb_internal bool mutex_try_lock(BlockingMutex *m);
  39. gb_internal void mutex_unlock (BlockingMutex *m);
  40. gb_internal void mutex_init (RecursiveMutex *m);
  41. gb_internal void mutex_destroy (RecursiveMutex *m);
  42. gb_internal void mutex_lock (RecursiveMutex *m);
  43. gb_internal bool mutex_try_lock(RecursiveMutex *m);
  44. gb_internal void mutex_unlock (RecursiveMutex *m);
  45. gb_internal void semaphore_init (Semaphore *s);
  46. gb_internal void semaphore_destroy(Semaphore *s);
  47. gb_internal void semaphore_post (Semaphore *s, i32 count);
  48. gb_internal void semaphore_wait (Semaphore *s);
  49. gb_internal void semaphore_release(Semaphore *s) { semaphore_post(s, 1); }
  50. gb_internal void condition_init(Condition *c);
  51. gb_internal void condition_destroy(Condition *c);
  52. gb_internal void condition_broadcast(Condition *c);
  53. gb_internal void condition_signal(Condition *c);
  54. gb_internal void condition_wait(Condition *c, BlockingMutex *m);
  55. gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms);
  56. gb_internal u32 thread_current_id(void);
  57. gb_internal void thread_init (ThreadPool *pool, Thread *t, isize idx);
  58. gb_internal void thread_init_and_start (ThreadPool *pool, Thread *t, isize idx);
  59. gb_internal void thread_join_and_destroy(Thread *t);
  60. gb_internal void thread_set_name (Thread *t, char const *name);
  61. gb_internal void yield_thread(void);
  62. gb_internal void yield_process(void);
  63. struct MutexGuard {
  64. MutexGuard() = delete;
  65. MutexGuard(MutexGuard const &) = delete;
  66. MutexGuard(BlockingMutex *bm) : bm{bm} {
  67. mutex_lock(this->bm);
  68. }
  69. MutexGuard(RecursiveMutex *rm) : rm{rm} {
  70. mutex_lock(this->rm);
  71. }
  72. MutexGuard(BlockingMutex &bm) : bm{&bm} {
  73. mutex_lock(this->bm);
  74. }
  75. MutexGuard(RecursiveMutex &rm) : rm{&rm} {
  76. mutex_lock(this->rm);
  77. }
  78. ~MutexGuard() {
  79. if (this->bm) {
  80. mutex_unlock(this->bm);
  81. } else if (this->rm) {
  82. mutex_unlock(this->rm);
  83. }
  84. }
  85. operator bool() const { return true; }
  86. BlockingMutex *bm;
  87. RecursiveMutex *rm;
  88. };
  89. #define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_){m})
  90. #define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_){m}
  91. #if defined(GB_SYSTEM_WINDOWS)
  92. struct BlockingMutex {
  93. SRWLOCK srwlock;
  94. };
  95. gb_internal void mutex_init(BlockingMutex *m) {
  96. }
  97. gb_internal void mutex_destroy(BlockingMutex *m) {
  98. }
  99. gb_internal void mutex_lock(BlockingMutex *m) {
  100. AcquireSRWLockExclusive(&m->srwlock);
  101. }
  102. gb_internal bool mutex_try_lock(BlockingMutex *m) {
  103. return !!TryAcquireSRWLockExclusive(&m->srwlock);
  104. }
  105. gb_internal void mutex_unlock(BlockingMutex *m) {
  106. ReleaseSRWLockExclusive(&m->srwlock);
  107. }
  108. struct RecursiveMutex {
  109. CRITICAL_SECTION win32_critical_section;
  110. };
  111. gb_internal void mutex_init(RecursiveMutex *m) {
  112. InitializeCriticalSection(&m->win32_critical_section);
  113. }
  114. gb_internal void mutex_destroy(RecursiveMutex *m) {
  115. DeleteCriticalSection(&m->win32_critical_section);
  116. }
  117. gb_internal void mutex_lock(RecursiveMutex *m) {
  118. EnterCriticalSection(&m->win32_critical_section);
  119. }
  120. gb_internal bool mutex_try_lock(RecursiveMutex *m) {
  121. return TryEnterCriticalSection(&m->win32_critical_section) != 0;
  122. }
  123. gb_internal void mutex_unlock(RecursiveMutex *m) {
  124. LeaveCriticalSection(&m->win32_critical_section);
  125. }
  126. struct Semaphore {
  127. void *win32_handle;
  128. };
  129. gb_internal void semaphore_init(Semaphore *s) {
  130. s->win32_handle = CreateSemaphoreA(NULL, 0, I32_MAX, NULL);
  131. }
  132. gb_internal void semaphore_destroy(Semaphore *s) {
  133. CloseHandle(s->win32_handle);
  134. }
  135. gb_internal void semaphore_post(Semaphore *s, i32 count) {
  136. ReleaseSemaphore(s->win32_handle, count, NULL);
  137. }
  138. gb_internal void semaphore_wait(Semaphore *s) {
  139. WaitForSingleObjectEx(s->win32_handle, INFINITE, FALSE);
  140. }
  141. struct Condition {
  142. CONDITION_VARIABLE cond;
  143. };
  144. gb_internal void condition_init(Condition *c) {
  145. }
  146. gb_internal void condition_destroy(Condition *c) {
  147. }
  148. gb_internal void condition_broadcast(Condition *c) {
  149. WakeAllConditionVariable(&c->cond);
  150. }
  151. gb_internal void condition_signal(Condition *c) {
  152. WakeConditionVariable(&c->cond);
  153. }
  154. gb_internal void condition_wait(Condition *c, BlockingMutex *m) {
  155. SleepConditionVariableSRW(&c->cond, &m->srwlock, INFINITE, 0);
  156. }
  157. gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms) {
  158. SleepConditionVariableSRW(&c->cond, &m->srwlock, timeout_in_ms, 0);
  159. }
  160. #else
  161. struct BlockingMutex {
  162. pthread_mutex_t pthread_mutex;
  163. };
  164. gb_internal void mutex_init(BlockingMutex *m) {
  165. pthread_mutex_init(&m->pthread_mutex, nullptr);
  166. }
  167. gb_internal void mutex_destroy(BlockingMutex *m) {
  168. pthread_mutex_destroy(&m->pthread_mutex);
  169. }
  170. gb_internal void mutex_lock(BlockingMutex *m) {
  171. pthread_mutex_lock(&m->pthread_mutex);
  172. }
  173. gb_internal bool mutex_try_lock(BlockingMutex *m) {
  174. return pthread_mutex_trylock(&m->pthread_mutex) == 0;
  175. }
  176. gb_internal void mutex_unlock(BlockingMutex *m) {
  177. pthread_mutex_unlock(&m->pthread_mutex);
  178. }
  179. struct RecursiveMutex {
  180. pthread_mutex_t pthread_mutex;
  181. pthread_mutexattr_t pthread_mutexattr;
  182. };
  183. gb_internal void mutex_init(RecursiveMutex *m) {
  184. pthread_mutexattr_init(&m->pthread_mutexattr);
  185. pthread_mutexattr_settype(&m->pthread_mutexattr, PTHREAD_MUTEX_RECURSIVE);
  186. pthread_mutex_init(&m->pthread_mutex, &m->pthread_mutexattr);
  187. }
  188. gb_internal void mutex_destroy(RecursiveMutex *m) {
  189. pthread_mutex_destroy(&m->pthread_mutex);
  190. }
  191. gb_internal void mutex_lock(RecursiveMutex *m) {
  192. pthread_mutex_lock(&m->pthread_mutex);
  193. }
  194. gb_internal bool mutex_try_lock(RecursiveMutex *m) {
  195. return pthread_mutex_trylock(&m->pthread_mutex) == 0;
  196. }
  197. gb_internal void mutex_unlock(RecursiveMutex *m) {
  198. pthread_mutex_unlock(&m->pthread_mutex);
  199. }
  200. #if defined(GB_SYSTEM_OSX)
  201. struct Semaphore {
  202. semaphore_t osx_handle;
  203. };
  204. gb_internal void semaphore_init (Semaphore *s) { semaphore_create(mach_task_self(), &s->osx_handle, SYNC_POLICY_FIFO, 0); }
  205. gb_internal void semaphore_destroy(Semaphore *s) { semaphore_destroy(mach_task_self(), s->osx_handle); }
  206. gb_internal void semaphore_post (Semaphore *s, i32 count) { while (count --> 0) semaphore_signal(s->osx_handle); }
  207. gb_internal void semaphore_wait (Semaphore *s) { semaphore_wait(s->osx_handle); }
  208. #elif defined(GB_SYSTEM_UNIX)
  209. struct Semaphore {
  210. sem_t unix_handle;
  211. };
  212. gb_internal void semaphore_init (Semaphore *s) { sem_init(&s->unix_handle, 0, 0); }
  213. gb_internal void semaphore_destroy(Semaphore *s) { sem_destroy(&s->unix_handle); }
  214. gb_internal void semaphore_post (Semaphore *s, i32 count) { while (count --> 0) sem_post(&s->unix_handle); }
  215. void semaphore_wait (Semaphore *s) { int i; do { i = sem_wait(&s->unix_handle); } while (i == -1 && errno == EINTR); }
  216. #else
  217. #error Implement Semaphore for this platform
  218. #endif
  219. struct Condition {
  220. pthread_cond_t pthread_cond;
  221. };
  222. gb_internal void condition_init(Condition *c) {
  223. pthread_cond_init(&c->pthread_cond, NULL);
  224. }
  225. gb_internal void condition_destroy(Condition *c) {
  226. pthread_cond_destroy(&c->pthread_cond);
  227. }
  228. gb_internal void condition_broadcast(Condition *c) {
  229. pthread_cond_broadcast(&c->pthread_cond);
  230. }
  231. gb_internal void condition_signal(Condition *c) {
  232. pthread_cond_signal(&c->pthread_cond);
  233. }
  234. gb_internal void condition_wait(Condition *c, BlockingMutex *m) {
  235. pthread_cond_wait(&c->pthread_cond, &m->pthread_mutex);
  236. }
  237. gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms) {
  238. struct timespec abstime = {};
  239. abstime.tv_sec = timeout_in_ms/1000;
  240. abstime.tv_nsec = cast(long)(timeout_in_ms%1000)*1e6;
  241. pthread_cond_timedwait(&c->pthread_cond, &m->pthread_mutex, &abstime);
  242. }
  243. #endif
  244. gb_internal u32 thread_current_id(void) {
  245. u32 thread_id;
  246. #if defined(GB_SYSTEM_WINDOWS)
  247. #if defined(GB_ARCH_32_BIT) && defined(GB_CPU_X86)
  248. thread_id = (cast(u32 *)__readfsdword(24))[9];
  249. #elif defined(GB_ARCH_64_BIT) && defined(GB_CPU_X86)
  250. thread_id = (cast(u32 *)__readgsqword(48))[18];
  251. #else
  252. thread_id = GetCurrentThreadId();
  253. #endif
  254. #elif defined(GB_SYSTEM_OSX) && defined(GB_ARCH_64_BIT)
  255. thread_id = pthread_mach_thread_np(pthread_self());
  256. #elif defined(GB_ARCH_32_BIT) && defined(GB_CPU_X86)
  257. __asm__("mov %%gs:0x08,%0" : "=r"(thread_id));
  258. #elif defined(GB_ARCH_64_BIT) && defined(GB_CPU_X86)
  259. __asm__("mov %%fs:0x10,%0" : "=r"(thread_id));
  260. #elif defined(GB_SYSTEM_LINUX)
  261. thread_id = gettid();
  262. #else
  263. #error Unsupported architecture for thread_current_id()
  264. #endif
  265. return thread_id;
  266. }
  267. gb_internal gb_inline void yield_thread(void) {
  268. #if defined(GB_SYSTEM_WINDOWS)
  269. _mm_pause();
  270. #elif defined(GB_SYSTEM_OSX)
  271. #if defined(GB_CPU_X86)
  272. __asm__ volatile ("" : : : "memory");
  273. #elif defined(GB_CPU_ARM)
  274. __asm__ volatile ("yield" : : : "memory");
  275. #endif
  276. #elif defined(GB_CPU_X86)
  277. _mm_pause();
  278. #elif defined(GB_CPU_ARM)
  279. __asm__ volatile ("yield" : : : "memory");
  280. #else
  281. #error Unknown architecture
  282. #endif
  283. }
  284. gb_internal gb_inline void yield(void) {
  285. #if defined(GB_SYSTEM_WINDOWS)
  286. YieldProcessor();
  287. #else
  288. sched_yield();
  289. #endif
  290. }
  291. #if defined(GB_SYSTEM_WINDOWS)
  292. gb_internal DWORD __stdcall internal_thread_proc(void *arg) {
  293. Thread *t = cast(Thread *)arg;
  294. thread_pool_thread_proc(t);
  295. return 0;
  296. }
  297. #else
  298. gb_internal void *internal_thread_proc(void *arg) {
  299. #if (GB_SYSTEM_LINUX)
  300. // NOTE: Don't permit any signal delivery to threads on Linux.
  301. sigset_t mask = {};
  302. sigfillset(&mask);
  303. GB_ASSERT_MSG(pthread_sigmask(SIG_BLOCK, &mask, nullptr) == 0, "failed to block signals");
  304. #endif
  305. Thread *t = cast(Thread *)arg;
  306. thread_pool_thread_proc(t);
  307. return NULL;
  308. }
  309. #endif
  310. gb_internal void thread_init(ThreadPool *pool, Thread *t, isize idx) {
  311. gb_zero_item(t);
  312. #if defined(GB_SYSTEM_WINDOWS)
  313. t->win32_handle = INVALID_HANDLE_VALUE;
  314. #else
  315. t->posix_handle = 0;
  316. #endif
  317. t->capacity = 1 << 14; // must be a power of 2
  318. t->queue = (WorkerTask *)calloc(sizeof(WorkerTask), t->capacity);
  319. t->head_and_tail = 0;
  320. t->pool = pool;
  321. t->idx = idx;
  322. }
  323. gb_internal void thread_init_and_start(ThreadPool *pool, Thread *t, isize idx) {
  324. thread_init(pool, t, idx);
  325. isize stack_size = 0;
  326. #if defined(GB_SYSTEM_WINDOWS)
  327. t->win32_handle = CreateThread(NULL, stack_size, internal_thread_proc, t, 0, NULL);
  328. GB_ASSERT_MSG(t->win32_handle != NULL, "CreateThread: GetLastError");
  329. #else
  330. {
  331. pthread_attr_t attr;
  332. pthread_attr_init(&attr);
  333. defer (pthread_attr_destroy(&attr));
  334. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
  335. if (stack_size != 0) {
  336. pthread_attr_setstacksize(&attr, stack_size);
  337. }
  338. pthread_create(&t->posix_handle, &attr, internal_thread_proc, t);
  339. }
  340. #endif
  341. }
  342. gb_internal void thread_join_and_destroy(Thread *t) {
  343. #if defined(GB_SYSTEM_WINDOWS)
  344. WaitForSingleObject(t->win32_handle, INFINITE);
  345. CloseHandle(t->win32_handle);
  346. t->win32_handle = INVALID_HANDLE_VALUE;
  347. #else
  348. pthread_join(t->posix_handle, NULL);
  349. t->posix_handle = 0;
  350. #endif
  351. }
  352. gb_internal void thread_set_name(Thread *t, char const *name) {
  353. #if defined(GB_COMPILER_MSVC)
  354. #pragma pack(push, 8)
  355. typedef struct {
  356. DWORD type;
  357. char const *name;
  358. DWORD id;
  359. DWORD flags;
  360. } gbprivThreadName;
  361. #pragma pack(pop)
  362. gbprivThreadName tn;
  363. tn.type = 0x1000;
  364. tn.name = name;
  365. tn.id = GetThreadId(cast(HANDLE)t->win32_handle);
  366. tn.flags = 0;
  367. __try {
  368. RaiseException(0x406d1388, 0, gb_size_of(tn)/4, cast(ULONG_PTR *)&tn);
  369. } __except(1 /*EXCEPTION_EXECUTE_HANDLER*/) {
  370. }
  371. #elif defined(GB_SYSTEM_WINDOWS) && !defined(GB_COMPILER_MSVC)
  372. // IMPORTANT TODO(bill): Set thread name for GCC/Clang on windows
  373. return;
  374. #elif defined(GB_SYSTEM_OSX)
  375. // TODO(bill): Test if this works
  376. pthread_setname_np(name);
  377. #elif defined(GB_SYSTEM_FREEBSD) || defined(GB_SYSTEM_OPENBSD)
  378. pthread_set_name_np(t->posix_handle, name);
  379. #else
  380. // TODO(bill): Test if this works
  381. pthread_setname_np(t->posix_handle, name);
  382. #endif
  383. }
  384. #if defined(GB_SYSTEM_LINUX)
  385. #include <linux/futex.h>
  386. #include <sys/syscall.h>
  387. typedef std::atomic<int32_t> Futex;
  388. typedef volatile int32_t Footex;
  389. gb_internal void tpool_wake_addr(Futex *addr) {
  390. for (;;) {
  391. int ret = syscall(SYS_futex, addr, FUTEX_WAKE, 1, NULL, NULL, 0);
  392. if (ret == -1) {
  393. perror("Futex wake");
  394. GB_PANIC("Failed in futex wake!\n");
  395. } else if (ret > 0) {
  396. return;
  397. }
  398. }
  399. }
  400. gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
  401. for (;;) {
  402. int ret = syscall(SYS_futex, addr, FUTEX_WAIT, val, NULL, NULL, 0);
  403. if (ret == -1) {
  404. if (errno != EAGAIN) {
  405. perror("Futex wait");
  406. GB_PANIC("Failed in futex wait!\n");
  407. } else {
  408. return;
  409. }
  410. } else if (ret == 0) {
  411. if (*addr != val) {
  412. return;
  413. }
  414. }
  415. }
  416. }
  417. #elif defined(GB_SYSTEM_FREEBSD)
  418. #include <sys/types.h>
  419. #include <sys/umtx.h>
  420. typedef std::atomic<int32_t> Futex;
  421. typedef volatile int32_t Footex;
  422. gb_internal void tpool_wake_addr(Futex *addr) {
  423. _umtx_op(addr, UMTX_OP_WAKE, 1, 0, 0);
  424. }
  425. gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
  426. for (;;) {
  427. int ret = _umtx_op(addr, UMTX_OP_WAIT_UINT, val, 0, NULL);
  428. if (ret == 0) {
  429. if (errno == ETIMEDOUT || errno == EINTR) {
  430. continue;
  431. }
  432. perror("Futex wait");
  433. GB_PANIC("Failed in futex wait!\n");
  434. } else if (ret == 0) {
  435. if (*addr != val) {
  436. return;
  437. }
  438. }
  439. }
  440. }
  441. #elif defined(GB_SYSTEM_OPENBSD)
  442. #include <sys/futex.h>
  443. typedef std::atomic<int32_t> Futex;
  444. typedef volatile int32_t Footex;
  445. gb_internal void tpool_wake_addr(Futex *addr) {
  446. for (;;) {
  447. int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL);
  448. if (ret == -1) {
  449. if (errno == ETIMEDOUT || errno == EINTR) {
  450. continue;
  451. }
  452. perror("Futex wake");
  453. GB_PANIC("futex wake fail");
  454. } else if (ret == 1) {
  455. return;
  456. }
  457. }
  458. }
  459. gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
  460. for (;;) {
  461. int ret = futex((volatile uint32_t *)addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL);
  462. if (ret == -1) {
  463. if (*addr != val) {
  464. return;
  465. }
  466. if (errno == ETIMEDOUT || errno == EINTR) {
  467. continue;
  468. }
  469. perror("Futex wait");
  470. GB_PANIC("Failed in futex wait!\n");
  471. }
  472. }
  473. }
  474. #elif defined(GB_SYSTEM_OSX)
  475. typedef std::atomic<int64_t> Futex;
  476. typedef volatile int64_t Footex;
  477. #define UL_COMPARE_AND_WAIT 0x00000001
  478. #define ULF_NO_ERRNO 0x01000000
  479. extern "C" int __ulock_wait(uint32_t operation, void *addr, uint64_t value, uint32_t timeout); /* timeout is specified in microseconds */
  480. extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value);
  481. gb_internal void tpool_wake_addr(Futex *addr) {
  482. for (;;) {
  483. int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0);
  484. if (ret >= 0) {
  485. return;
  486. }
  487. ret = -ret;
  488. if (ret == EINTR || ret == EFAULT) {
  489. continue;
  490. }
  491. if (ret == ENOENT) {
  492. return;
  493. }
  494. GB_PANIC("Failed in futex wake!\n");
  495. }
  496. }
  497. gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
  498. for (;;) {
  499. int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, val, 0);
  500. if (ret >= 0) {
  501. if (*addr != val) {
  502. return;
  503. }
  504. continue;
  505. }
  506. ret = -ret;
  507. if (ret == EINTR || ret == EFAULT) {
  508. continue;
  509. }
  510. if (ret == ENOENT) {
  511. return;
  512. }
  513. GB_PANIC("Failed in futex wait!\n");
  514. }
  515. }
  516. #elif defined(GB_SYSTEM_WINDOWS)
  517. typedef std::atomic<int64_t> Futex;
  518. typedef volatile int64_t Footex;
  519. gb_internal void tpool_wake_addr(Futex *addr) {
  520. WakeByAddressSingle((void *)addr);
  521. }
  522. gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
  523. for (;;) {
  524. WaitOnAddress(addr, (void *)&val, sizeof(val), INFINITE);
  525. if (*addr != val) break;
  526. }
  527. }
  528. #endif
  529. #if defined(GB_SYSTEM_WINDOWS)
  530. #pragma warning(pop)
  531. #endif