Browse Source

Correct mutex usage for `path_to_fullpath`; make `ThreadPool` use `BlockingMutex`

gingerBill 4 years ago
parent
commit
3fde4616e0
3 changed files with 16 additions and 15 deletions
  1. 5 5
      src/build_settings.cpp
  2. 1 0
      src/main.cpp
  3. 10 10
      src/thread_pool.cpp

+ 5 - 5
src/build_settings.cpp

@@ -470,7 +470,6 @@ bool allow_check_foreign_filepath(void) {
 String const WIN32_SEPARATOR_STRING = {cast(u8 *)"\\", 1};
 String const NIX_SEPARATOR_STRING   = {cast(u8 *)"/",  1};
 
-
 String internal_odin_root_dir(void);
 String odin_root_dir(void) {
 	if (global_module_path_set) {
@@ -684,12 +683,13 @@ String internal_odin_root_dir(void) {
 }
 #endif
 
+gb_global BlockingMutex fullpath_mutex;
 
 #if defined(GB_SYSTEM_WINDOWS)
 String path_to_fullpath(gbAllocator a, String s) {
 	String result = {};
-	mutex_lock(&string_buffer_mutex);
-	defer (mutex_unlock(&string_buffer_mutex));
+	mutex_lock(&fullpath_mutex);
+	defer (mutex_unlock(&fullpath_mutex));
 
 	gbTempArenaMemory tmp = gb_temp_arena_memory_begin(&string_buffer_arena);
 	defer (gb_temp_arena_memory_end(tmp));
@@ -716,9 +716,9 @@ String path_to_fullpath(gbAllocator a, String s) {
 #elif defined(GB_SYSTEM_OSX) || defined(GB_SYSTEM_UNIX)
 String path_to_fullpath(gbAllocator a, String s) {
 	char *p;
-	mutex_lock(&string_buffer_mutex);
+	mutex_lock(&fullpath_mutex);
 	p = realpath(cast(char *)s.text, 0);
-	mutex_unlock(&string_buffer_mutex);
+	mutex_unlock(&fullpath_mutex);
 	if(p == nullptr) return String{};
 	return make_string_c(p);
 }

+ 1 - 0
src/main.cpp

@@ -2009,6 +2009,7 @@ int main(int arg_count, char const **arg_ptr) {
 	arena_init(&permanent_arena, heap_allocator());
 	temp_allocator_init(&temporary_allocator_data, 16*1024*1024);
 	arena_init(&global_ast_arena, heap_allocator());
+	mutex_init(&fullpath_mutex);
 
 	init_string_buffer_memory();
 	init_string_interner();

+ 10 - 10
src/thread_pool.cpp

@@ -11,10 +11,10 @@ struct WorkerTask {
 
 
 struct ThreadPool {
-	gbMutex     mutex;
-	gbSemaphore sem_available;
-	gbAtomic32  processing_work_count;
-	bool        is_running;
+	BlockingMutex mutex;
+	gbSemaphore   sem_available;
+	gbAtomic32    processing_work_count;
+	bool          is_running;
 
 	gbAllocator allocator;
 
@@ -39,7 +39,7 @@ void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count
 	mpmc_init(&pool->tasks, a, 1024);
 	pool->thread_count = gb_max(thread_count, 0);
 	pool->threads = gb_alloc_array(a, gbThread, pool->thread_count);
-	gb_mutex_init(&pool->mutex);
+	mutex_init(&pool->mutex);
 	gb_semaphore_init(&pool->sem_available);
 	pool->is_running = true;
 
@@ -91,7 +91,7 @@ void thread_pool_destroy(ThreadPool *pool) {
 	thread_pool_join(pool);
 
 	gb_semaphore_destroy(&pool->sem_available);
-	gb_mutex_destroy(&pool->mutex);
+	mutex_destroy(&pool->mutex);
 	gb_free(pool->allocator, pool->threads);
 	pool->thread_count = 0;
 	mpmc_destroy(&pool->tasks);
@@ -99,7 +99,7 @@ void thread_pool_destroy(ThreadPool *pool) {
 
 
 void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data) {
-	gb_mutex_lock(&pool->mutex);
+	mutex_lock(&pool->mutex);
 
 	WorkerTask task = {};
 	task.do_work = proc;
@@ -107,7 +107,7 @@ void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data) {
 
 	mpmc_enqueue(&pool->tasks, task);
 	gb_semaphore_post(&pool->sem_available, 1);
-	gb_mutex_unlock(&pool->mutex);
+	mutex_unlock(&pool->mutex);
 }
 
 bool thread_pool_try_and_pop_task(ThreadPool *pool, WorkerTask *task) {
@@ -139,9 +139,9 @@ void thread_pool_wait_to_process(ThreadPool *pool) {
 
 		// Safety-kick
 		while (pool->tasks.count.load(std::memory_order_relaxed) > 0 && gb_atomic32_load(&pool->processing_work_count) == 0) {
-			gb_mutex_lock(&pool->mutex);
+			mutex_lock(&pool->mutex);
 			gb_semaphore_post(&pool->sem_available, cast(i32)pool->tasks.count.load(std::memory_order_relaxed));
-			gb_mutex_unlock(&pool->mutex);
+			mutex_unlock(&pool->mutex);
 		}
 
 		gb_yield();