Browse Source

`sync2.Auto_Reset_Event`; Make atomic operations names clearer

gingerBill 4 years ago
parent
commit
502ad0c10b

+ 41 - 43
core/sync/sync2/atomic.odin

@@ -2,78 +2,76 @@ package sync2
 
 
 import "intrinsics"
 import "intrinsics"
 
 
-// TODO(bill): Is this even a good design? The intrinsics seem to be more than good enough and just as clean
-
 cpu_relax :: intrinsics.cpu_relax;
 cpu_relax :: intrinsics.cpu_relax;
 
 
-atomic_fence        :: intrinsics.atomic_fence;
-atomic_fence_acq    :: intrinsics.atomic_fence_acq;
-atomic_fence_rel    :: intrinsics.atomic_fence_rel;
-atomic_fence_acqrel :: intrinsics.atomic_fence_acqrel;
+atomic_fence         :: intrinsics.atomic_fence;
+atomic_fence_acquire :: intrinsics.atomic_fence_acq;
+atomic_fence_release :: intrinsics.atomic_fence_rel;
+atomic_fence_acqrel  :: intrinsics.atomic_fence_acqrel;
 
 
 atomic_store           :: intrinsics.atomic_store;
 atomic_store           :: intrinsics.atomic_store;
-atomic_store_rel       :: intrinsics.atomic_store_rel;
+atomic_store_release   :: intrinsics.atomic_store_rel;
 atomic_store_relaxed   :: intrinsics.atomic_store_relaxed;
 atomic_store_relaxed   :: intrinsics.atomic_store_relaxed;
 atomic_store_unordered :: intrinsics.atomic_store_unordered;
 atomic_store_unordered :: intrinsics.atomic_store_unordered;
 
 
 atomic_load           :: intrinsics.atomic_load;
 atomic_load           :: intrinsics.atomic_load;
-atomic_load_acq       :: intrinsics.atomic_load_acq;
+atomic_load_acquire   :: intrinsics.atomic_load_acq;
 atomic_load_relaxed   :: intrinsics.atomic_load_relaxed;
 atomic_load_relaxed   :: intrinsics.atomic_load_relaxed;
 atomic_load_unordered :: intrinsics.atomic_load_unordered;
 atomic_load_unordered :: intrinsics.atomic_load_unordered;
 
 
 atomic_add          :: intrinsics.atomic_add;
 atomic_add          :: intrinsics.atomic_add;
-atomic_add_acq      :: intrinsics.atomic_add_acq;
-atomic_add_rel      :: intrinsics.atomic_add_rel;
+atomic_add_acquire  :: intrinsics.atomic_add_acq;
+atomic_add_release  :: intrinsics.atomic_add_rel;
 atomic_add_acqrel   :: intrinsics.atomic_add_acqrel;
 atomic_add_acqrel   :: intrinsics.atomic_add_acqrel;
 atomic_add_relaxed  :: intrinsics.atomic_add_relaxed;
 atomic_add_relaxed  :: intrinsics.atomic_add_relaxed;
 atomic_sub          :: intrinsics.atomic_sub;
 atomic_sub          :: intrinsics.atomic_sub;
-atomic_sub_acq      :: intrinsics.atomic_sub_acq;
-atomic_sub_rel      :: intrinsics.atomic_sub_rel;
+atomic_sub_acquire  :: intrinsics.atomic_sub_acq;
+atomic_sub_release  :: intrinsics.atomic_sub_rel;
 atomic_sub_acqrel   :: intrinsics.atomic_sub_acqrel;
 atomic_sub_acqrel   :: intrinsics.atomic_sub_acqrel;
 atomic_sub_relaxed  :: intrinsics.atomic_sub_relaxed;
 atomic_sub_relaxed  :: intrinsics.atomic_sub_relaxed;
 atomic_and          :: intrinsics.atomic_and;
 atomic_and          :: intrinsics.atomic_and;
-atomic_and_acq      :: intrinsics.atomic_and_acq;
-atomic_and_rel      :: intrinsics.atomic_and_rel;
+atomic_and_acquire  :: intrinsics.atomic_and_acq;
+atomic_and_release  :: intrinsics.atomic_and_rel;
 atomic_and_acqrel   :: intrinsics.atomic_and_acqrel;
 atomic_and_acqrel   :: intrinsics.atomic_and_acqrel;
 atomic_and_relaxed  :: intrinsics.atomic_and_relaxed;
 atomic_and_relaxed  :: intrinsics.atomic_and_relaxed;
 atomic_nand         :: intrinsics.atomic_nand;
 atomic_nand         :: intrinsics.atomic_nand;
-atomic_nand_acq     :: intrinsics.atomic_nand_acq;
-atomic_nand_rel     :: intrinsics.atomic_nand_rel;
+atomic_nand_acquire :: intrinsics.atomic_nand_acq;
+atomic_nand_release :: intrinsics.atomic_nand_rel;
 atomic_nand_acqrel  :: intrinsics.atomic_nand_acqrel;
 atomic_nand_acqrel  :: intrinsics.atomic_nand_acqrel;
 atomic_nand_relaxed :: intrinsics.atomic_nand_relaxed;
 atomic_nand_relaxed :: intrinsics.atomic_nand_relaxed;
 atomic_or           :: intrinsics.atomic_or;
 atomic_or           :: intrinsics.atomic_or;
-atomic_or_acq       :: intrinsics.atomic_or_acq;
-atomic_or_rel       :: intrinsics.atomic_or_rel;
+atomic_or_acquire   :: intrinsics.atomic_or_acq;
+atomic_or_release   :: intrinsics.atomic_or_rel;
 atomic_or_acqrel    :: intrinsics.atomic_or_acqrel;
 atomic_or_acqrel    :: intrinsics.atomic_or_acqrel;
 atomic_or_relaxed   :: intrinsics.atomic_or_relaxed;
 atomic_or_relaxed   :: intrinsics.atomic_or_relaxed;
 atomic_xor          :: intrinsics.atomic_xor;
 atomic_xor          :: intrinsics.atomic_xor;
-atomic_xor_acq      :: intrinsics.atomic_xor_acq;
-atomic_xor_rel      :: intrinsics.atomic_xor_rel;
+atomic_xor_acquire  :: intrinsics.atomic_xor_acq;
+atomic_xor_release  :: intrinsics.atomic_xor_rel;
 atomic_xor_acqrel   :: intrinsics.atomic_xor_acqrel;
 atomic_xor_acqrel   :: intrinsics.atomic_xor_acqrel;
 atomic_xor_relaxed  :: intrinsics.atomic_xor_relaxed;
 atomic_xor_relaxed  :: intrinsics.atomic_xor_relaxed;
 
 
-atomic_xchg         :: intrinsics.atomic_xchg;
-atomic_xchg_acq     :: intrinsics.atomic_xchg_acq;
-atomic_xchg_rel     :: intrinsics.atomic_xchg_rel;
-atomic_xchg_acqrel  :: intrinsics.atomic_xchg_acqrel;
-atomic_xchg_relaxed :: intrinsics.atomic_xchg_relaxed;
+atomic_exchange         :: intrinsics.atomic_xchg;
+atomic_exchange_acquire :: intrinsics.atomic_xchg_acq;
+atomic_exchange_release :: intrinsics.atomic_xchg_rel;
+atomic_exchange_acqrel  :: intrinsics.atomic_xchg_acqrel;
+atomic_exchange_relaxed :: intrinsics.atomic_xchg_relaxed;
 
 
-atomic_cxchg                    :: intrinsics.atomic_cxchg;
-atomic_cxchg_acq                :: intrinsics.atomic_cxchg_acq;
-atomic_cxchg_rel                :: intrinsics.atomic_cxchg_rel;
-atomic_cxchg_acqrel             :: intrinsics.atomic_cxchg_acqrel;
-atomic_cxchg_relaxed            :: intrinsics.atomic_cxchg_relaxed;
-atomic_cxchg_failrelaxed        :: intrinsics.atomic_cxchg_failrelaxed;
-atomic_cxchg_failacq            :: intrinsics.atomic_cxchg_failacq;
-atomic_cxchg_acq_failrelaxed    :: intrinsics.atomic_cxchg_acq_failrelaxed;
-atomic_cxchg_acqrel_failrelaxed :: intrinsics.atomic_cxchg_acqrel_failrelaxed;
+atomic_compare_exchange_strong                     :: intrinsics.atomic_cxchg;
+atomic_compare_exchange_strong_acquire             :: intrinsics.atomic_cxchg_acq;
+atomic_compare_exchange_strong_release             :: intrinsics.atomic_cxchg_rel;
+atomic_compare_exchange_strong_acqrel              :: intrinsics.atomic_cxchg_acqrel;
+atomic_compare_exchange_strong_relaxed             :: intrinsics.atomic_cxchg_relaxed;
+atomic_compare_exchange_strong_failrelaxed         :: intrinsics.atomic_cxchg_failrelaxed;
+atomic_compare_exchange_strong_failacquire         :: intrinsics.atomic_cxchg_failacq;
+atomic_compare_exchange_strong_acquire_failrelaxed :: intrinsics.atomic_cxchg_acq_failrelaxed;
+atomic_compare_exchange_strong_acqrel_failrelaxed  :: intrinsics.atomic_cxchg_acqrel_failrelaxed;
 
 
-atomic_cxchgweak                    :: intrinsics.atomic_cxchgweak;
-atomic_cxchgweak_acq                :: intrinsics.atomic_cxchgweak_acq;
-atomic_cxchgweak_rel                :: intrinsics.atomic_cxchgweak_rel;
-atomic_cxchgweak_acqrel             :: intrinsics.atomic_cxchgweak_acqrel;
-atomic_cxchgweak_relaxed            :: intrinsics.atomic_cxchgweak_relaxed;
-atomic_cxchgweak_failrelaxed        :: intrinsics.atomic_cxchgweak_failrelaxed;
-atomic_cxchgweak_failacq            :: intrinsics.atomic_cxchgweak_failacq;
-atomic_cxchgweak_acq_failrelaxed    :: intrinsics.atomic_cxchgweak_acq_failrelaxed;
-atomic_cxchgweak_acqrel_failrelaxed :: intrinsics.atomic_cxchgweak_acqrel_failrelaxed;
+atomic_compare_exchange_weak                     :: intrinsics.atomic_cxchgweak;
+atomic_compare_exchange_weak_acquire             :: intrinsics.atomic_cxchgweak_acq;
+atomic_compare_exchange_weak_release             :: intrinsics.atomic_cxchgweak_rel;
+atomic_compare_exchange_weak_acqrel              :: intrinsics.atomic_cxchgweak_acqrel;
+atomic_compare_exchange_weak_relaxed             :: intrinsics.atomic_cxchgweak_relaxed;
+atomic_compare_exchange_weak_failrelaxed         :: intrinsics.atomic_cxchgweak_failrelaxed;
+atomic_compare_exchange_weak_failacquire         :: intrinsics.atomic_cxchgweak_failacq;
+atomic_compare_exchange_weak_acquire_failrelaxed :: intrinsics.atomic_cxchgweak_acq_failrelaxed;
+atomic_compare_exchange_weak_acqrel_failrelaxed  :: intrinsics.atomic_cxchgweak_acqrel_failrelaxed;

+ 42 - 12
core/sync/sync2/extended.odin

@@ -122,6 +122,36 @@ barrier_wait :: proc(b: ^Barrier) -> (is_leader: bool) {
 }
 }
 
 
 
 
+Auto_Reset_Event :: struct {
+	// status ==  0: Event is reset and no threads are waiting
+	// status ==  1: Event is signaled
+	// status == -N: Event is reset and N threads are waiting
+	status: i32,
+	sema:   Sema,
+}
+
+auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
+	old_status := atomic_load_relaxed(&e.status);
+	for {
+		new_status := old_status + 1 if old_status < 1 else 1;
+		if _, ok := atomic_compare_exchange_weak_release(&e.status, old_status, new_status); ok {
+			break;
+		}
+
+		if old_status < 0 {
+			sema_post(&e.sema);
+		}
+	}
+}
+
+auto_reset_event_wait :: proc(e: ^Auto_Reset_Event) {
+	old_status := atomic_sub_acquire(&e.status, 1);
+	if old_status < 1 {
+		sema_wait(&e.sema);
+	}
+}
+
+
 
 
 Ticket_Mutex :: struct {
 Ticket_Mutex :: struct {
 	ticket:  uint,
 	ticket:  uint,
@@ -130,7 +160,7 @@ Ticket_Mutex :: struct {
 
 
 ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
 ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
 	ticket := atomic_add_relaxed(&m.ticket, 1);
 	ticket := atomic_add_relaxed(&m.ticket, 1);
-	for ticket != atomic_load_acq(&m.serving) {
+	for ticket != atomic_load_acquire(&m.serving) {
 		cpu_relax();
 		cpu_relax();
 	}
 	}
 }
 }
@@ -142,23 +172,23 @@ ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
 
 
 
 
 Benaphore :: struct {
 Benaphore :: struct {
-	counter: int,
+	counter: i32,
 	sema:    Sema,
 	sema:    Sema,
 }
 }
 
 
 benaphore_lock :: proc(b: ^Benaphore) {
 benaphore_lock :: proc(b: ^Benaphore) {
-	if atomic_add_acq(&b.counter, 1) > 1 {
+	if atomic_add_acquire(&b.counter, 1) > 1 {
 		sema_wait(&b.sema);
 		sema_wait(&b.sema);
 	}
 	}
 }
 }
 
 
 benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
 benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
-	v, _ := atomic_cxchg_acq(&b.counter, 1, 0);
+	v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0);
 	return v == 0;
 	return v == 0;
 }
 }
 
 
 benaphore_unlock :: proc(b: ^Benaphore) {
 benaphore_unlock :: proc(b: ^Benaphore) {
-	if atomic_sub_rel(&b.counter, 1) > 0 {
+	if atomic_sub_release(&b.counter, 1) > 0 {
 		sema_post(&b.sema);
 		sema_post(&b.sema);
 	}
 	}
 }
 }
@@ -166,13 +196,13 @@ benaphore_unlock :: proc(b: ^Benaphore) {
 Recursive_Benaphore :: struct {
 Recursive_Benaphore :: struct {
 	counter:   int,
 	counter:   int,
 	owner:     int,
 	owner:     int,
-	recursion: int,
+	recursion: i32,
 	sema:      Sema,
 	sema:      Sema,
 }
 }
 
 
 recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
 recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
 	tid := runtime.current_thread_id();
 	tid := runtime.current_thread_id();
-	if atomic_add_acq(&b.counter, 1) > 1 {
+	if atomic_add_acquire(&b.counter, 1) > 1 {
 		if tid != b.owner {
 		if tid != b.owner {
 			sema_wait(&b.sema);
 			sema_wait(&b.sema);
 		}
 		}
@@ -185,10 +215,10 @@ recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
 recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
 recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
 	tid := runtime.current_thread_id();
 	tid := runtime.current_thread_id();
 	if b.owner == tid {
 	if b.owner == tid {
-		atomic_add_acq(&b.counter, 1);
+		atomic_add_acquire(&b.counter, 1);
 	}
 	}
 
 
-	if v, _ := atomic_cxchg_acq(&b.counter, 1, 0); v != 0 {
+	if v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0); v != 0 {
 		return false;
 		return false;
 	}
 	}
 	// inside the lock
 	// inside the lock
@@ -205,7 +235,7 @@ recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
 	if recursion == 0 {
 	if recursion == 0 {
 		b.owner = 0;
 		b.owner = 0;
 	}
 	}
-	if atomic_sub_rel(&b.counter, 1) > 0 {
+	if atomic_sub_release(&b.counter, 1) > 0 {
 		if recursion == 0 {
 		if recursion == 0 {
 			sema_post(&b.sema);
 			sema_post(&b.sema);
 		}
 		}
@@ -223,7 +253,7 @@ Once :: struct {
 }
 }
 
 
 once_do :: proc(o: ^Once, fn: proc()) {
 once_do :: proc(o: ^Once, fn: proc()) {
-	if atomic_load_acq(&o.done) == false {
+	if atomic_load_acquire(&o.done) == false {
 		_once_do_slow(o, fn);
 		_once_do_slow(o, fn);
 	}
 	}
 }
 }
@@ -234,6 +264,6 @@ _once_do_slow :: proc(o: ^Once, fn: proc()) {
 	defer mutex_unlock(&o.m);
 	defer mutex_unlock(&o.m);
 	if !o.done {
 	if !o.done {
 		fn();
 		fn();
-		atomic_store_rel(&o.done, true);
+		atomic_store_release(&o.done, true);
 	}
 	}
 }
 }

+ 49 - 0
core/sync/sync2/primitives.odin

@@ -25,6 +25,18 @@ mutex_try_lock :: proc(m: ^Mutex) -> bool {
 	return _mutex_try_lock(m);
 	return _mutex_try_lock(m);
 }
 }
 
 
+// Example:
+//
+// if mutex_guard(&m) {
+//         ...
+// }
+//
+@(deferred_in=mutex_unlock)
+mutex_guard :: proc(m: ^Mutex) -> bool {
+	mutex_lock(m);
+	return true;
+}
+
 // A RW_Mutex is a reader/writer mutual exclusion lock
 // A RW_Mutex is a reader/writer mutual exclusion lock
 // The lock can be held by any arbitrary number of readers or a single writer
 // The lock can be held by any arbitrary number of readers or a single writer
 // The zero value for a RW_Mutex is an unlocked mutex
 // The zero value for a RW_Mutex is an unlocked mutex
@@ -65,6 +77,31 @@ rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
 	return _rw_mutex_try_shared_lock(rw);
 	return _rw_mutex_try_shared_lock(rw);
 }
 }
 
 
+// Example:
+//
+// if rw_mutex_guard(&m) {
+//         ...
+// }
+//
+@(deferred_in=rw_mutex_unlock)
+rw_mutex_guard :: proc(m: ^RW_Mutex) -> bool {
+	rw_mutex_lock(m);
+	return true;
+}
+
+// Example:
+//
+// if rw_mutex_shared_guard(&m) {
+//         ...
+// }
+//
+@(deferred_in=rw_mutex_shared_unlock)
+rw_mutex_shared_guard :: proc(m: ^RW_Mutex) -> bool {
+	rw_mutex_shared_lock(m);
+	return true;
+}
+
+
 
 
 // A Recusrive_Mutex is a recursive mutual exclusion lock
 // A Recusrive_Mutex is a recursive mutual exclusion lock
 // The zero value for a Recursive_Mutex is an unlocked mutex
 // The zero value for a Recursive_Mutex is an unlocked mutex
@@ -87,6 +124,18 @@ recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
 }
 }
 
 
 
 
+// Example:
+//
+// if recursive_mutex_guard(&m) {
+//         ...
+// }
+//
+@(deferred_in=recursive_mutex_unlock)
+recursive_mutex_guard :: proc(m: ^Recursive_Mutex) -> bool {
+	recursive_mutex_lock(m);
+	return true;
+}
+
 
 
 // Cond implements a condition variable, a rendezvous point for threads
 // Cond implements a condition variable, a rendezvous point for threads
 // waiting for signalling the occurence of an event
 // waiting for signalling the occurence of an event

+ 2 - 2
core/sync/sync2/primitives_pthreads.odin

@@ -84,7 +84,7 @@ _rw_mutex_shared_lock :: proc(rw: ^RW_Mutex) {
 	state := atomic_load(&rw.impl.state);
 	state := atomic_load(&rw.impl.state);
 	for state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
 	for state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
 		ok: bool;
 		ok: bool;
-		state, ok = atomic_cxchgweak(&rw.impl.state, state, state + RW_Mutex_State_Reader);
+		state, ok = atomic_compare_exchange_weak(&rw.impl.state, state, state + RW_Mutex_State_Reader);
 		if ok {
 		if ok {
 			return;
 			return;
 		}
 		}
@@ -107,7 +107,7 @@ _rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
 _rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
 _rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
 	state := atomic_load(&rw.impl.state);
 	state := atomic_load(&rw.impl.state);
 	if state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
 	if state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
-		_, ok := atomic_cxchg(&rw.impl.state, state, state + RW_Mutex_State_Reader);
+		_, ok := atomic_compare_exchange_strong(&rw.impl.state, state, state + RW_Mutex_State_Reader);
 		if ok {
 		if ok {
 			return true;
 			return true;
 		}
 		}

+ 4 - 4
core/sync/sync2/primitives_windows.odin

@@ -58,7 +58,7 @@ _Recursive_Mutex :: struct {
 _recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
 _recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
 	tid := win32.GetCurrentThreadId();
 	tid := win32.GetCurrentThreadId();
 	for {
 	for {
-		prev_owner := atomic_cxchg_acq(&m.impl.owner, tid, 0);
+		prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0);
 		switch prev_owner {
 		switch prev_owner {
 		case 0, tid:
 		case 0, tid:
 			m.impl.claim_count += 1;
 			m.impl.claim_count += 1;
@@ -80,7 +80,7 @@ _recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
 	if m.impl.claim_count != 0 {
 	if m.impl.claim_count != 0 {
 		return;
 		return;
 	}
 	}
-	atomic_xchg_rel(&m.impl.owner, 0);
+	atomic_exchange_release(&m.impl.owner, 0);
 	win32.WakeByAddressSingle(&m.impl.owner);
 	win32.WakeByAddressSingle(&m.impl.owner);
 	// outside the lock
 	// outside the lock
 
 
@@ -88,7 +88,7 @@ _recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
 
 
 _recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
 _recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
 	tid := win32.GetCurrentThreadId();
 	tid := win32.GetCurrentThreadId();
-	prev_owner := atomic_cxchg_acq(&m.impl.owner, tid, 0);
+	prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0);
 	switch prev_owner {
 	switch prev_owner {
 	case 0, tid:
 	case 0, tid:
 		m.impl.claim_count += 1;
 		m.impl.claim_count += 1;
@@ -139,7 +139,7 @@ _sema_wait :: proc(s: ^Sema) {
 			);
 			);
 			original_count = s.impl.count;
 			original_count = s.impl.count;
 		}
 		}
-		if original_count == atomic_cxchg(&s.impl.count, original_count-1, original_count) {
+		if original_count == atomic_compare_exchange_strong(&s.impl.count, original_count-1, original_count) {
 			return;
 			return;
 		}
 		}
 	}
 	}