Forráskód Böngészése

Rewrite `Atomic_RW_Mutex`

This patch simplifies the implementation and fixes #5254.

Previously, the mutex was set up as if there could be multiple writers,
and there seemed to be some confusion as to which `Writer` bits to
check, as not all were checked or set at the same time.

This could also result in the mutex being left in a non-zero state even
after unlocking all locks.

All unneeded state has been removed and extra checks have been put in
place.
Feoramund 3 hónapja
szülő
commit
8cde9dce47
1 módosított fájl, 42 hozzáadás és 18 törlés
  1. 42 18
      core/sync/primitives_atomic.odin

+ 42 - 18
core/sync/primitives_atomic.odin

@@ -95,20 +95,16 @@ atomic_mutex_guard :: proc "contextless" (m: ^Atomic_Mutex) -> bool {
 
 
 
 
 Atomic_RW_Mutex_State :: distinct uint
 Atomic_RW_Mutex_State :: distinct uint
-Atomic_RW_Mutex_State_Half_Width :: size_of(Atomic_RW_Mutex_State)*8/2
-Atomic_RW_Mutex_State_Is_Writing :: Atomic_RW_Mutex_State(1)
-Atomic_RW_Mutex_State_Writer     :: Atomic_RW_Mutex_State(1)<<1
-Atomic_RW_Mutex_State_Reader     :: Atomic_RW_Mutex_State(1)<<Atomic_RW_Mutex_State_Half_Width
+Atomic_RW_Mutex_State_Is_Writing  :: Atomic_RW_Mutex_State(1) << (size_of(Atomic_RW_Mutex_State)*8-1)
+Atomic_RW_Mutex_State_Reader      :: Atomic_RW_Mutex_State(1)
+Atomic_RW_Mutex_State_Reader_Mask :: ~Atomic_RW_Mutex_State_Is_Writing
 
 
-Atomic_RW_Mutex_State_Writer_Mask :: Atomic_RW_Mutex_State(1<<(Atomic_RW_Mutex_State_Half_Width-1) - 1) << 1
-Atomic_RW_Mutex_State_Reader_Mask :: Atomic_RW_Mutex_State(1<<(Atomic_RW_Mutex_State_Half_Width-1) - 1) << Atomic_RW_Mutex_State_Half_Width
 
 
-
-// An Atomic_RW_Mutex is a reader/writer mutual exclusion lock
-// The lock can be held by any arbitrary number of readers or a single writer
-// The zero value for an Atomic_RW_Mutex is an unlocked mutex
+// An Atomic_RW_Mutex is a reader/writer mutual exclusion lock.
+// The lock can be held by any arbitrary number of readers or a single writer.
+// The zero value for an Atomic_RW_Mutex is an unlocked mutex.
 //
 //
-// An Atomic_RW_Mutex must not be copied after first use
+// An Atomic_RW_Mutex must not be copied after first use.
 Atomic_RW_Mutex :: struct #no_copy {
 Atomic_RW_Mutex :: struct #no_copy {
 	state: Atomic_RW_Mutex_State,
 	state: Atomic_RW_Mutex_State,
 	mutex: Atomic_Mutex,
 	mutex: Atomic_Mutex,
@@ -118,11 +114,17 @@ Atomic_RW_Mutex :: struct #no_copy {
 // atomic_rw_mutex_lock locks rw for writing (with a single writer)
 // atomic_rw_mutex_lock locks rw for writing (with a single writer)
 // If the mutex is already locked for reading or writing, the mutex blocks until the mutex is available.
 // If the mutex is already locked for reading or writing, the mutex blocks until the mutex is available.
 atomic_rw_mutex_lock :: proc "contextless" (rw: ^Atomic_RW_Mutex) {
 atomic_rw_mutex_lock :: proc "contextless" (rw: ^Atomic_RW_Mutex) {
-	_ = atomic_add(&rw.state, Atomic_RW_Mutex_State_Writer)
 	atomic_mutex_lock(&rw.mutex)
 	atomic_mutex_lock(&rw.mutex)
 
 
-	state := atomic_or(&rw.state, Atomic_RW_Mutex_State_Writer)
+	state := atomic_or(&rw.state, Atomic_RW_Mutex_State_Is_Writing)
 	if state & Atomic_RW_Mutex_State_Reader_Mask != 0 {
 	if state & Atomic_RW_Mutex_State_Reader_Mask != 0 {
+		// There's at least one reader, so wait for the last one to post the semaphore.
+		//
+		// Because we hold the exclusive lock, no more readers can come in
+		// during this time, which will prevent any situations where the last
+		// reader is pre-empted around the count turning zero, which would
+		// result in the potential for another reader to run amok after the
+		// other posts.
 		atomic_sema_wait(&rw.sema)
 		atomic_sema_wait(&rw.sema)
 	}
 	}
 }
 }
@@ -138,10 +140,15 @@ atomic_rw_mutex_try_lock :: proc "contextless" (rw: ^Atomic_RW_Mutex) -> bool {
 	if atomic_mutex_try_lock(&rw.mutex) {
 	if atomic_mutex_try_lock(&rw.mutex) {
 		state := atomic_load(&rw.state)
 		state := atomic_load(&rw.state)
 		if state & Atomic_RW_Mutex_State_Reader_Mask == 0 {
 		if state & Atomic_RW_Mutex_State_Reader_Mask == 0 {
-			_ = atomic_or(&rw.state, Atomic_RW_Mutex_State_Is_Writing)
-			return true
+			// Compare-and-exchange for absolute certainty that no one has come in to read.
+			_, ok := atomic_compare_exchange_strong(&rw.state, state, state | Atomic_RW_Mutex_State_Is_Writing)
+			if ok {
+				return true
+			}
 		}
 		}
 
 
+		// A reader is active or came in while we have the lock, so we need to
+		// back out.
 		atomic_mutex_unlock(&rw.mutex)
 		atomic_mutex_unlock(&rw.mutex)
 	}
 	}
 	return false
 	return false
@@ -150,16 +157,22 @@ atomic_rw_mutex_try_lock :: proc "contextless" (rw: ^Atomic_RW_Mutex) -> bool {
 // atomic_rw_mutex_shared_lock locks rw for reading (with arbitrary number of readers)
 // atomic_rw_mutex_shared_lock locks rw for reading (with arbitrary number of readers)
 atomic_rw_mutex_shared_lock :: proc "contextless" (rw: ^Atomic_RW_Mutex) {
 atomic_rw_mutex_shared_lock :: proc "contextless" (rw: ^Atomic_RW_Mutex) {
 	state := atomic_load(&rw.state)
 	state := atomic_load(&rw.state)
-	for state & (Atomic_RW_Mutex_State_Is_Writing|Atomic_RW_Mutex_State_Writer_Mask) == 0 {
+	for state & Atomic_RW_Mutex_State_Is_Writing == 0 {
 		ok: bool
 		ok: bool
 		state, ok = atomic_compare_exchange_weak(&rw.state, state, state + Atomic_RW_Mutex_State_Reader)
 		state, ok = atomic_compare_exchange_weak(&rw.state, state, state + Atomic_RW_Mutex_State_Reader)
 		if ok {
 		if ok {
+			// We succesfully took the shared reader lock without any writers intervening.
 			return
 			return
 		}
 		}
 	}
 	}
 
 
+	// A writer is active or came in while we were trying to get a shared
+	// reader lock, so now we must take the full lock in order to wait for the
+	// writer to give it up.
 	atomic_mutex_lock(&rw.mutex)
 	atomic_mutex_lock(&rw.mutex)
+	// At this point, we have the lock, so we can add to the reader count.
 	_ = atomic_add(&rw.state, Atomic_RW_Mutex_State_Reader)
 	_ = atomic_add(&rw.state, Atomic_RW_Mutex_State_Reader)
+	// Then we give up the lock to let other readers (or writers) come through.
 	atomic_mutex_unlock(&rw.mutex)
 	atomic_mutex_unlock(&rw.mutex)
 }
 }
 
 
@@ -169,6 +182,8 @@ atomic_rw_mutex_shared_unlock :: proc "contextless" (rw: ^Atomic_RW_Mutex) {
 
 
 	if (state & Atomic_RW_Mutex_State_Reader_Mask == Atomic_RW_Mutex_State_Reader) &&
 	if (state & Atomic_RW_Mutex_State_Reader_Mask == Atomic_RW_Mutex_State_Reader) &&
 	   (state & Atomic_RW_Mutex_State_Is_Writing != 0) {
 	   (state & Atomic_RW_Mutex_State_Is_Writing != 0) {
+	   // We were the last reader, so post to the writer with the lock who's
+	   // waiting to continue.
 		atomic_sema_post(&rw.sema)
 		atomic_sema_post(&rw.sema)
 	}
 	}
 }
 }
@@ -176,12 +191,21 @@ atomic_rw_mutex_shared_unlock :: proc "contextless" (rw: ^Atomic_RW_Mutex) {
 // atomic_rw_mutex_try_shared_lock tries to lock rw for reading (with arbitrary number of readers)
 // atomic_rw_mutex_try_shared_lock tries to lock rw for reading (with arbitrary number of readers)
 atomic_rw_mutex_try_shared_lock :: proc "contextless" (rw: ^Atomic_RW_Mutex) -> bool {
 atomic_rw_mutex_try_shared_lock :: proc "contextless" (rw: ^Atomic_RW_Mutex) -> bool {
 	state := atomic_load(&rw.state)
 	state := atomic_load(&rw.state)
-	if state & (Atomic_RW_Mutex_State_Is_Writing|Atomic_RW_Mutex_State_Writer_Mask) == 0 {
-		_, ok := atomic_compare_exchange_strong(&rw.state, state, state + Atomic_RW_Mutex_State_Reader)
+	// NOTE: We need to check this in a for loop, because it is possible for
+	// another reader to change the underlying state which would cause our
+	// compare-and-exchange to fail.
+	for state & (Atomic_RW_Mutex_State_Is_Writing) == 0 {
+		ok: bool
+		state, ok = atomic_compare_exchange_weak(&rw.state, state, state + Atomic_RW_Mutex_State_Reader)
 		if ok {
 		if ok {
 			return true
 			return true
 		}
 		}
 	}
 	}
+	// A writer is active or came in during our lock attempt.
+
+	// We try to take the full lock, and if that succeeds (perhaps because the
+	// writer finished during the time since we failed our CAS), we increment
+	// the reader count and head on.
 	if atomic_mutex_try_lock(&rw.mutex) {
 	if atomic_mutex_try_lock(&rw.mutex) {
 		_ = atomic_add(&rw.state, Atomic_RW_Mutex_State_Reader)
 		_ = atomic_add(&rw.state, Atomic_RW_Mutex_State_Reader)
 		atomic_mutex_unlock(&rw.mutex)
 		atomic_mutex_unlock(&rw.mutex)