Преглед на файлове

Merge pull request #93739 from AThousandShips/lock_unlock

[Core] Allow locking/unlocking of `MutexLock`
Rémi Verschelde преди 1 година
родител
ревизия
ce44c91223
променени са 4 файла, в които са добавени 59 реда и са изтрити 26 реда
  1. 17 16
      core/io/ip.cpp
  2. 7 8
      core/io/resource_loader.cpp
  3. 15 2
      core/os/mutex.h
  4. 20 0
      core/os/safe_binary_mutex.h

+ 17 - 16
core/io/ip.cpp

@@ -81,17 +81,17 @@ struct _IP_ResolverPrivate {
 				continue;
 				continue;
 			}
 			}
 
 
-			mutex.lock();
+			MutexLock lock(mutex);
 			List<IPAddress> response;
 			List<IPAddress> response;
 			String hostname = queue[i].hostname;
 			String hostname = queue[i].hostname;
 			IP::Type type = queue[i].type;
 			IP::Type type = queue[i].type;
-			mutex.unlock();
+			lock.temp_unlock();
 
 
 			// We should not lock while resolving the hostname,
 			// We should not lock while resolving the hostname,
 			// only when modifying the queue.
 			// only when modifying the queue.
 			IP::get_singleton()->_resolve_hostname(response, hostname, type);
 			IP::get_singleton()->_resolve_hostname(response, hostname, type);
 
 
-			MutexLock lock(mutex);
+			lock.temp_relock();
 			// Could have been completed by another function, or deleted.
 			// Could have been completed by another function, or deleted.
 			if (queue[i].status.get() != IP::RESOLVER_STATUS_WAITING) {
 			if (queue[i].status.get() != IP::RESOLVER_STATUS_WAITING) {
 				continue;
 				continue;
@@ -131,21 +131,22 @@ PackedStringArray IP::resolve_hostname_addresses(const String &p_hostname, Type
 	List<IPAddress> res;
 	List<IPAddress> res;
 	String key = _IP_ResolverPrivate::get_cache_key(p_hostname, p_type);
 	String key = _IP_ResolverPrivate::get_cache_key(p_hostname, p_type);
 
 
-	resolver->mutex.lock();
-	if (resolver->cache.has(key)) {
-		res = resolver->cache[key];
-	} else {
-		// This should be run unlocked so the resolver thread can keep resolving
-		// other requests.
-		resolver->mutex.unlock();
-		_resolve_hostname(res, p_hostname, p_type);
-		resolver->mutex.lock();
-		// We might be overriding another result, but we don't care as long as the result is valid.
-		if (res.size()) {
-			resolver->cache[key] = res;
+	{
+		MutexLock lock(resolver->mutex);
+		if (resolver->cache.has(key)) {
+			res = resolver->cache[key];
+		} else {
+			// This should be run unlocked so the resolver thread can keep resolving
+			// other requests.
+			lock.temp_unlock();
+			_resolve_hostname(res, p_hostname, p_type);
+			lock.temp_relock();
+			// We might be overriding another result, but we don't care as long as the result is valid.
+			if (res.size()) {
+				resolver->cache[key] = res;
+			}
 		}
 		}
 	}
 	}
-	resolver->mutex.unlock();
 
 
 	PackedStringArray result;
 	PackedStringArray result;
 	for (const IPAddress &E : res) {
 	for (const IPAddress &E : res) {

+ 7 - 8
core/io/resource_loader.cpp

@@ -690,10 +690,10 @@ Ref<Resource> ResourceLoader::load_threaded_get(const String &p_path, Error *r_e
 		if (Thread::is_main_thread() && !load_token->local_path.is_empty()) {
 		if (Thread::is_main_thread() && !load_token->local_path.is_empty()) {
 			const ThreadLoadTask &load_task = thread_load_tasks[load_token->local_path];
 			const ThreadLoadTask &load_task = thread_load_tasks[load_token->local_path];
 			while (load_task.status == THREAD_LOAD_IN_PROGRESS) {
 			while (load_task.status == THREAD_LOAD_IN_PROGRESS) {
-				thread_load_lock.~MutexLock();
+				thread_load_lock.temp_unlock();
 				bool exit = !_ensure_load_progress();
 				bool exit = !_ensure_load_progress();
 				OS::get_singleton()->delay_usec(1000);
 				OS::get_singleton()->delay_usec(1000);
-				new (&thread_load_lock) MutexLock(thread_load_mutex);
+				thread_load_lock.temp_relock();
 				if (exit) {
 				if (exit) {
 					break;
 					break;
 				}
 				}
@@ -754,7 +754,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
 			bool loader_is_wtp = load_task.task_id != 0;
 			bool loader_is_wtp = load_task.task_id != 0;
 			if (loader_is_wtp) {
 			if (loader_is_wtp) {
 				// Loading thread is in the worker pool.
 				// Loading thread is in the worker pool.
-				thread_load_mutex.unlock();
+				p_thread_load_lock.temp_unlock();
 
 
 				PREPARE_FOR_WTP_WAIT
 				PREPARE_FOR_WTP_WAIT
 				Error wait_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
 				Error wait_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
@@ -770,7 +770,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
 					_run_load_task(&load_task);
 					_run_load_task(&load_task);
 				}
 				}
 
 
-				thread_load_mutex.lock();
+				p_thread_load_lock.temp_relock();
 				load_task.awaited = true;
 				load_task.awaited = true;
 
 
 				DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
 				DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
@@ -1208,7 +1208,7 @@ void ResourceLoader::clear_translation_remaps() {
 void ResourceLoader::clear_thread_load_tasks() {
 void ResourceLoader::clear_thread_load_tasks() {
 	// Bring the thing down as quickly as possible without causing deadlocks or leaks.
 	// Bring the thing down as quickly as possible without causing deadlocks or leaks.
 
 
-	thread_load_mutex.lock();
+	MutexLock thread_load_lock(thread_load_mutex);
 	cleaning_tasks = true;
 	cleaning_tasks = true;
 
 
 	while (true) {
 	while (true) {
@@ -1227,9 +1227,9 @@ void ResourceLoader::clear_thread_load_tasks() {
 		if (none_running) {
 		if (none_running) {
 			break;
 			break;
 		}
 		}
-		thread_load_mutex.unlock();
+		thread_load_lock.temp_unlock();
 		OS::get_singleton()->delay_usec(1000);
 		OS::get_singleton()->delay_usec(1000);
-		thread_load_mutex.lock();
+		thread_load_lock.temp_relock();
 	}
 	}
 
 
 	while (user_load_tokens.begin()) {
 	while (user_load_tokens.begin()) {
@@ -1244,7 +1244,6 @@ void ResourceLoader::clear_thread_load_tasks() {
 	thread_load_tasks.clear();
 	thread_load_tasks.clear();
 
 
 	cleaning_tasks = false;
 	cleaning_tasks = false;
-	thread_load_mutex.unlock();
 }
 }
 
 
 void ResourceLoader::load_path_remaps() {
 void ResourceLoader::load_path_remaps() {

+ 15 - 2
core/os/mutex.h

@@ -72,7 +72,7 @@ public:
 
 
 template <typename MutexT>
 template <typename MutexT>
 class MutexLock {
 class MutexLock {
-	THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
+	mutable THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
 
 
 public:
 public:
 	explicit MutexLock(const MutexT &p_mutex) :
 	explicit MutexLock(const MutexT &p_mutex) :
@@ -82,8 +82,18 @@ public:
 	template <typename T = MutexT>
 	template <typename T = MutexT>
 	_ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &_get_lock(
 	_ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &_get_lock(
 			typename std::enable_if<std::is_same<T, THREADING_NAMESPACE::mutex>::value> * = nullptr) const {
 			typename std::enable_if<std::is_same<T, THREADING_NAMESPACE::mutex>::value> * = nullptr) const {
-		return const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(lock);
+		return lock;
 	}
 	}
+
+	_ALWAYS_INLINE_ void temp_relock() const {
+		lock.lock();
+	}
+
+	_ALWAYS_INLINE_ void temp_unlock() const {
+		lock.unlock();
+	}
+
+	// TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below).
 };
 };
 
 
 using Mutex = MutexImpl<THREADING_NAMESPACE::recursive_mutex>; // Recursive, for general use
 using Mutex = MutexImpl<THREADING_NAMESPACE::recursive_mutex>; // Recursive, for general use
@@ -109,6 +119,9 @@ template <typename MutexT>
 class MutexLock {
 class MutexLock {
 public:
 public:
 	MutexLock(const MutexT &p_mutex) {}
 	MutexLock(const MutexT &p_mutex) {}
+
+	void temp_relock() const {}
+	void temp_unlock() const {}
 };
 };
 
 
 using Mutex = MutexImpl;
 using Mutex = MutexImpl;

+ 20 - 0
core/os/safe_binary_mutex.h

@@ -108,6 +108,16 @@ public:
 	~MutexLock() {
 	~MutexLock() {
 		mutex.unlock();
 		mutex.unlock();
 	}
 	}
+
+	_ALWAYS_INLINE_ void temp_relock() const {
+		mutex.lock();
+	}
+
+	_ALWAYS_INLINE_ void temp_unlock() const {
+		mutex.unlock();
+	}
+
+	// TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below).
 };
 };
 
 
 #ifdef __clang__
 #ifdef __clang__
@@ -128,6 +138,16 @@ public:
 	void unlock() const {}
 	void unlock() const {}
 };
 };
 
 
+template <int Tag>
+class MutexLock<SafeBinaryMutex<Tag>> {
+public:
+	MutexLock(const SafeBinaryMutex<Tag> &p_mutex) {}
+	~MutexLock() {}
+
+	void temp_relock() const {}
+	void temp_unlock() const {}
+};
+
 #endif // THREADS_ENABLED
 #endif // THREADS_ENABLED
 
 
 #endif // SAFE_BINARY_MUTEX_H
 #endif // SAFE_BINARY_MUTEX_H